Page MenuHomeFreeBSD

D31657.id94855.diff
No OneTemporary

D31657.id94855.diff

diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -2203,14 +2203,14 @@
/* Inline sosend_generic(). */
- error = sblock(sb, SBL_WAIT);
+ error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
MPASS(error == 0);
sendanother:
SOCKBUF_LOCK(sb);
if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
SOCKBUF_UNLOCK(sb);
- sbunlock(sb);
+ SOCK_IO_SEND_UNLOCK(so);
if ((so->so_options & SO_NOSIGPIPE) == 0) {
PROC_LOCK(job->userproc);
kern_psignal(job->userproc, SIGPIPE);
@@ -2223,12 +2223,12 @@
error = so->so_error;
so->so_error = 0;
SOCKBUF_UNLOCK(sb);
- sbunlock(sb);
+ SOCK_IO_SEND_UNLOCK(so);
goto out;
}
if ((so->so_state & SS_ISCONNECTED) == 0) {
SOCKBUF_UNLOCK(sb);
- sbunlock(sb);
+ SOCK_IO_SEND_UNLOCK(so);
error = ENOTCONN;
goto out;
}
@@ -2241,13 +2241,13 @@
*/
if (!aio_set_cancel_function(job, t4_aiotx_cancel)) {
SOCKBUF_UNLOCK(sb);
- sbunlock(sb);
+ SOCK_IO_SEND_UNLOCK(so);
error = ECANCELED;
goto out;
}
TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list);
SOCKBUF_UNLOCK(sb);
- sbunlock(sb);
+ SOCK_IO_SEND_UNLOCK(so);
goto out;
}
@@ -2274,7 +2274,7 @@
m = alloc_aiotx_mbuf(job, len);
if (m == NULL) {
- sbunlock(sb);
+ SOCK_IO_SEND_UNLOCK(so);
error = EFAULT;
goto out;
}
@@ -2285,7 +2285,7 @@
INP_WLOCK(inp);
if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
INP_WUNLOCK(inp);
- sbunlock(sb);
+ SOCK_IO_SEND_UNLOCK(so);
error = ECONNRESET;
goto out;
}
@@ -2307,7 +2307,7 @@
INP_WUNLOCK(inp);
if (sendmore)
goto sendanother;
- sbunlock(sb);
+ SOCK_IO_SEND_UNLOCK(so);
if (error)
goto out;
diff --git a/sys/dev/hyperv/hvsock/hv_sock.c b/sys/dev/hyperv/hvsock/hv_sock.c
--- a/sys/dev/hyperv/hvsock/hv_sock.c
+++ b/sys/dev/hyperv/hvsock/hv_sock.c
@@ -664,18 +664,17 @@
if (uio->uio_resid == 0 || uio->uio_rw != UIO_READ)
return (EINVAL);
- sb = &so->so_rcv;
-
orig_resid = uio->uio_resid;
/* Prevent other readers from entering the socket. */
- error = sblock(sb, SBLOCKWAIT(flags));
+ error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
if (error) {
HVSOCK_DBG(HVSOCK_DBG_ERR,
- "%s: sblock returned error = %d\n", __func__, error);
+ "%s: soiolock returned error = %d\n", __func__, error);
return (error);
}
+ sb = &so->so_rcv;
SOCKBUF_LOCK(sb);
cbarg.uio = uio;
@@ -779,8 +778,7 @@
out:
SOCKBUF_UNLOCK(sb);
-
- sbunlock(sb);
+ SOCK_IO_RECV_UNLOCK(so);
/* We recieved a FIN in this call */
if (so->so_error == ESHUTDOWN) {
@@ -823,18 +821,17 @@
if (uio->uio_resid == 0 || uio->uio_rw != UIO_WRITE)
return (EINVAL);
- sb = &so->so_snd;
-
orig_resid = uio->uio_resid;
/* Prevent other writers from entering the socket. */
- error = sblock(sb, SBLOCKWAIT(flags));
+ error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
if (error) {
HVSOCK_DBG(HVSOCK_DBG_ERR,
- "%s: sblock returned error = %d\n", __func__, error);
+ "%s: soiolocak returned error = %d\n", __func__, error);
return (error);
}
+ sb = &so->so_snd;
SOCKBUF_LOCK(sb);
if ((sb->sb_state & SBS_CANTSENDMORE) ||
@@ -893,7 +890,7 @@
out:
SOCKBUF_UNLOCK(sb);
- sbunlock(sb);
+ SOCK_IO_SEND_UNLOCK(so);
return (error);
}
@@ -1674,7 +1671,7 @@
{
struct hvsock_sc *sc = (struct hvsock_sc *)device_get_softc(dev);
struct socket *so;
- int error, retry;
+ int retry;
if (bootverbose)
device_printf(dev, "hvsock_detach called.\n");
@@ -1703,8 +1700,7 @@
*/
if (so) {
retry = 0;
- while ((error = sblock(&so->so_rcv, 0)) ==
- EWOULDBLOCK) {
+ while (SOCK_IO_RECV_LOCK(so, 0) == EWOULDBLOCK) {
/*
* Someone is reading, rx br is busy
*/
@@ -1715,8 +1711,7 @@
"retry = %d\n", retry++);
}
retry = 0;
- while ((error = sblock(&so->so_snd, 0)) ==
- EWOULDBLOCK) {
+ while (SOCK_IO_SEND_LOCK(so, 0) == EWOULDBLOCK) {
/*
* Someone is sending, tx br is busy
*/
@@ -1734,8 +1729,8 @@
sc->pcb = NULL;
if (so) {
- sbunlock(&so->so_rcv);
- sbunlock(&so->so_snd);
+ SOCK_IO_RECV_UNLOCK(so);
+ SOCK_IO_SEND_UNLOCK(so);
so->so_pcb = NULL;
}
diff --git a/sys/kern/kern_sendfile.c b/sys/kern/kern_sendfile.c
--- a/sys/kern/kern_sendfile.c
+++ b/sys/kern/kern_sendfile.c
@@ -741,7 +741,9 @@
* XXXRW: Historically this has assumed non-interruptibility, so now
* we implement that, but possibly shouldn't.
*/
- (void)sblock(&so->so_snd, SBL_WAIT | SBL_NOINTR);
+ error = SOCK_IO_SEND_LOCK(so, SBL_WAIT | SBL_NOINTR);
+ if (error != 0)
+ goto out;
#ifdef KERN_TLS
tls = ktls_hold(so->so_snd.sb_tls_info);
#endif
@@ -1211,7 +1213,7 @@
* Send trailers. Wimp out and use writev(2).
*/
if (trl_uio != NULL) {
- sbunlock(&so->so_snd);
+ SOCK_IO_SEND_UNLOCK(so);
error = kern_writev(td, sockfd, trl_uio);
if (error == 0)
sbytes += td->td_retval[0];
@@ -1219,7 +1221,7 @@
}
done:
- sbunlock(&so->so_snd);
+ SOCK_IO_SEND_UNLOCK(so);
out:
/*
* If there was no error we have to clear td->td_retval[0]
diff --git a/sys/kern/uipc_ktls.c b/sys/kern/uipc_ktls.c
--- a/sys/kern/uipc_ktls.c
+++ b/sys/kern/uipc_ktls.c
@@ -1171,7 +1171,7 @@
return (error);
}
- error = sblock(&so->so_snd, SBL_WAIT);
+ error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
if (error) {
ktls_cleanup(tls);
return (error);
@@ -1191,7 +1191,7 @@
so->so_snd.sb_flags |= SB_TLS_IFNET;
SOCKBUF_UNLOCK(&so->so_snd);
INP_WUNLOCK(inp);
- sbunlock(&so->so_snd);
+ SOCK_IO_SEND_UNLOCK(so);
counter_u64_add(ktls_offload_total, 1);
@@ -1292,7 +1292,7 @@
return (error);
}
- error = sblock(&so->so_snd, SBL_WAIT);
+ error = SOCK_IO_SEND_LOCK(so, SBL_WAIT);
if (error) {
counter_u64_add(ktls_switch_failed, 1);
ktls_free(tls_new);
@@ -1307,7 +1307,7 @@
*/
if (tls != so->so_snd.sb_tls_info) {
counter_u64_add(ktls_switch_failed, 1);
- sbunlock(&so->so_snd);
+ SOCK_IO_SEND_UNLOCK(so);
ktls_free(tls_new);
ktls_free(tls);
INP_WLOCK(inp);
@@ -1319,7 +1319,7 @@
if (tls_new->mode != TCP_TLS_MODE_SW)
so->so_snd.sb_flags |= SB_TLS_IFNET;
SOCKBUF_UNLOCK(&so->so_snd);
- sbunlock(&so->so_snd);
+ SOCK_IO_SEND_UNLOCK(so);
/*
* Drop two references on 'tls'. The first is for the
diff --git a/sys/kern/uipc_sockbuf.c b/sys/kern/uipc_sockbuf.c
--- a/sys/kern/uipc_sockbuf.c
+++ b/sys/kern/uipc_sockbuf.c
@@ -475,34 +475,6 @@
sb->sb_timeo, 0, 0));
}
-int
-sblock(struct sockbuf *sb, int flags)
-{
-
- KASSERT((flags & SBL_VALID) == flags,
- ("sblock: flags invalid (0x%x)", flags));
-
- if (flags & SBL_WAIT) {
- if ((sb->sb_flags & SB_NOINTR) ||
- (flags & SBL_NOINTR)) {
- sx_xlock(&sb->sb_sx);
- return (0);
- }
- return (sx_xlock_sig(&sb->sb_sx));
- } else {
- if (sx_try_xlock(&sb->sb_sx) == 0)
- return (EWOULDBLOCK);
- return (0);
- }
-}
-
-void
-sbunlock(struct sockbuf *sb)
-{
-
- sx_xunlock(&sb->sb_sx);
-}
-
/*
* Wakeup processes waiting on a socket buffer. Do asynchronous notification
* via SIGIO if the socket has the SS_ASYNC flag set.
diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c
--- a/sys/kern/uipc_socket.c
+++ b/sys/kern/uipc_socket.c
@@ -1587,7 +1587,7 @@
if (control != NULL)
clen = control->m_len;
- error = sblock(&so->so_snd, SBLOCKWAIT(flags));
+ error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
if (error)
goto out;
@@ -1785,7 +1785,7 @@
} while (resid);
release:
- sbunlock(&so->so_snd);
+ SOCK_IO_SEND_UNLOCK(so);
out:
#ifdef KERN_TLS
if (tls != NULL)
@@ -1932,7 +1932,7 @@
(*pr->pr_usrreqs->pru_rcvd)(so, 0);
}
- error = sblock(&so->so_rcv, SBLOCKWAIT(flags));
+ error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
if (error)
return (error);
@@ -2387,7 +2387,7 @@
if (flagsp != NULL)
*flagsp |= flags;
release:
- sbunlock(&so->so_rcv);
+ SOCK_IO_RECV_UNLOCK(so);
return (error);
}
@@ -2434,7 +2434,7 @@
#endif
/* Prevent other readers from entering the socket. */
- error = sblock(sb, SBLOCKWAIT(flags));
+ error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
if (error)
return (error);
SOCKBUF_LOCK(sb);
@@ -2442,7 +2442,7 @@
#ifdef KERN_TLS
if (sb->sb_tls_info != NULL) {
SOCKBUF_UNLOCK(sb);
- sbunlock(sb);
+ SOCK_IO_RECV_UNLOCK(so);
return (soreceive_generic(so, psa, uio, mp0, controlp,
flagsp));
}
@@ -2605,11 +2605,10 @@
if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
goto restart;
out:
- SOCKBUF_LOCK_ASSERT(sb);
SBLASTRECORDCHK(sb);
SBLASTMBUFCHK(sb);
SOCKBUF_UNLOCK(sb);
- sbunlock(sb);
+ SOCK_IO_RECV_UNLOCK(so);
return (error);
}
@@ -2876,6 +2875,7 @@
struct sockbuf *sb = &so->so_rcv;
struct protosw *pr = so->so_proto;
struct socket aso;
+ int error;
VNET_SO_ASSERT(so);
@@ -2893,7 +2893,9 @@
* despite any existing socket disposition on interruptable waiting.
*/
socantrcvmore(so);
- (void) sblock(sb, SBL_WAIT | SBL_NOINTR);
+ error = SOCK_IO_RECV_LOCK(so, SBL_WAIT | SBL_NOINTR);
+ KASSERT(error == 0, ("%s: cannot lock sock %p recv buffer",
+ __func__, so));
/*
* Invalidate/clear most of the sockbuf structure, but leave selinfo
@@ -2907,7 +2909,7 @@
bzero(&sb->sb_startzero,
sizeof(*sb) - offsetof(struct sockbuf, sb_startzero));
SOCKBUF_UNLOCK(sb);
- sbunlock(sb);
+ SOCK_IO_RECV_UNLOCK(so);
/*
* Dispose of special rights and flush the copied socket. Don't call
@@ -4100,6 +4102,39 @@
wakeup(&so->so_timeo);
}
+int
+soiolock(struct socket *so, struct sx *sx, int flags)
+{
+ int error;
+
+ KASSERT((flags & SBL_VALID) == flags,
+ ("soiolock: invalid flags %#x", flags));
+
+ if ((flags & SBL_WAIT) != 0) {
+ if ((flags & SBL_NOINTR) != 0) {
+ sx_xlock(sx);
+ } else {
+ error = sx_xlock_sig(sx);
+ if (error != 0)
+ return (error);
+ }
+ } else if (!sx_try_xlock(sx)) {
+ return (EWOULDBLOCK);
+ }
+
+ if (__predict_false(SOLISTENING(so))) {
+ sx_xunlock(sx);
+ return (ENOTCONN);
+ }
+ return (0);
+}
+
+void
+soiounlock(struct sx *sx)
+{
+ sx_xunlock(sx);
+}
+
/*
* Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
*/
diff --git a/sys/netinet/sctputil.c b/sys/netinet/sctputil.c
--- a/sys/netinet/sctputil.c
+++ b/sys/netinet/sctputil.c
@@ -4796,10 +4796,10 @@
old_so = old_inp->sctp_socket;
new_so = new_inp->sctp_socket;
TAILQ_INIT(&tmp_queue);
- error = sblock(&old_so->so_rcv, waitflags);
+ error = SOCK_IO_RECV_LOCK(old_so, waitflags);
if (error) {
/*
- * Gak, can't get sblock, we have a problem. data will be
+ * Gak, can't get I/O lock, we have a problem. data will be
* left stranded.. and we don't dare look at it since the
* other thread may be reading something. Oh well, its a
* screwed up app that does a peeloff OR a accept while
@@ -4831,9 +4831,8 @@
}
}
SCTP_INP_READ_UNLOCK(old_inp);
- /* Remove the sb-lock on the old socket */
-
- sbunlock(&old_so->so_rcv);
+ /* Remove the recv-lock on the old socket */
+ SOCK_IO_RECV_UNLOCK(old_so);
/* Now we move them over to the new socket buffer */
SCTP_INP_READ_LOCK(new_inp);
TAILQ_FOREACH_SAFE(control, &tmp_queue, next, nctl) {
@@ -5586,7 +5585,7 @@
rwnd_req, block_allowed, so->so_rcv.sb_cc, (uint32_t)uio->uio_resid);
}
- error = sblock(&so->so_rcv, (block_allowed ? SBL_WAIT : 0));
+ error = SOCK_IO_RECV_LOCK(so, (block_allowed ? SBL_WAIT : 0));
if (error) {
goto release_unlocked;
}
@@ -6234,8 +6233,8 @@
}
/*
* We need to wait for more data a few things: - We don't
- * sbunlock() so we don't get someone else reading. - We
- * must be sure to account for the case where what is added
+ * release the I/O lock so we don't get someone else reading.
+ * - We must be sure to account for the case where what is added
* is NOT to our control when we wakeup.
*/
@@ -6383,7 +6382,7 @@
hold_sblock = 0;
}
- sbunlock(&so->so_rcv);
+ SOCK_IO_RECV_UNLOCK(so);
sockbuf_lock = 0;
release_unlocked:
@@ -6418,7 +6417,7 @@
SOCKBUF_UNLOCK(&so->so_rcv);
}
if (sockbuf_lock) {
- sbunlock(&so->so_rcv);
+ SOCK_IO_RECV_UNLOCK(so);
}
if (freecnt_applied) {
diff --git a/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c b/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c
--- a/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c
+++ b/sys/ofed/drivers/infiniband/ulp/sdp/sdp_main.c
@@ -1105,7 +1105,7 @@
td->td_ru.ru_msgsnd++;
ssk = sdp_sk(so);
- error = sblock(&so->so_snd, SBLOCKWAIT(flags));
+ error = SOCK_IO_SEND_LOCK(so, SBLOCKWAIT(flags));
if (error)
goto out;
@@ -1196,7 +1196,7 @@
} while (resid);
release:
- sbunlock(&so->so_snd);
+ SOCK_IO_SEND_UNLOCK(so);
out:
if (top != NULL)
m_freem(top);
@@ -1267,9 +1267,9 @@
ssk = sdp_sk(so);
/* Prevent other readers from entering the socket. */
- error = sblock(sb, SBLOCKWAIT(flags));
+ error = SOCK_IO_RECV_LOCK(so, SBLOCKWAIT(flags));
if (error)
- goto out;
+ return (error);
SOCKBUF_LOCK(sb);
/* Easy one, no space to copyout anything. */
@@ -1423,11 +1423,10 @@
if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
goto restart;
out:
- SOCKBUF_LOCK_ASSERT(sb);
SBLASTRECORDCHK(sb);
SBLASTMBUFCHK(sb);
SOCKBUF_UNLOCK(sb);
- sbunlock(sb);
+ SOCK_IO_RECV_UNLOCK(so);
return (error);
}
diff --git a/sys/sys/sockbuf.h b/sys/sys/sockbuf.h
--- a/sys/sys/sockbuf.h
+++ b/sys/sys/sockbuf.h
@@ -78,7 +78,6 @@
*
* Locking key to struct sockbuf:
* (a) locked by SOCKBUF_LOCK().
- * (b) locked by sblock()
*/
struct sockbuf {
struct mtx sb_mtx; /* sockbuf lock */
@@ -183,8 +182,6 @@
struct mbuf *
sbsndmbuf(struct sockbuf *sb, u_int off, u_int *moff);
int sbwait(struct sockbuf *sb);
-int sblock(struct sockbuf *sb, int flags);
-void sbunlock(struct sockbuf *sb);
void sballoc(struct sockbuf *, struct mbuf *);
void sbfree(struct sockbuf *, struct mbuf *);
void sballoc_ktls_rx(struct sockbuf *sb, struct mbuf *m);
diff --git a/sys/sys/socketvar.h b/sys/sys/socketvar.h
--- a/sys/sys/socketvar.h
+++ b/sys/sys/socketvar.h
@@ -249,12 +249,21 @@
*/
/*
- * Flags to sblock().
+ * Flags to soiolock().
*/
#define SBL_WAIT 0x00000001 /* Wait if not immediately available. */
#define SBL_NOINTR 0x00000002 /* Force non-interruptible sleep. */
#define SBL_VALID (SBL_WAIT | SBL_NOINTR)
+#define SOCK_IO_SEND_LOCK(so, flags) \
+ soiolock((so), &(so)->so_snd.sb_sx, (flags))
+#define SOCK_IO_SEND_UNLOCK(so) \
+ soiounlock(&(so)->so_snd.sb_sx)
+#define SOCK_IO_RECV_LOCK(so, flags) \
+ soiolock((so), &(so)->so_rcv.sb_sx, (flags))
+#define SOCK_IO_RECV_UNLOCK(so) \
+ soiounlock(&(so)->so_rcv.sb_sx)
+
/*
* Do we need to notify the other side when I/O is possible?
*/
@@ -484,6 +493,8 @@
void socantsendmore_locked(struct socket *so);
void soroverflow(struct socket *so);
void soroverflow_locked(struct socket *so);
+int soiolock(struct socket *so, struct sx *sx, int flags);
+void soiounlock(struct sx *sx);
/*
* Accept filter functions (duh).

File Metadata

Mime Type
text/plain
Expires
Mon, Oct 13, 1:58 AM (11 h, 54 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
23651157
Default Alt Text
D31657.id94855.diff (14 KB)

Event Timeline