Index: sys/kern/sys_socket.c =================================================================== --- sys/kern/sys_socket.c +++ sys/kern/sys_socket.c @@ -751,10 +751,13 @@ } void -sowakeup_aio(struct socket *so, struct sockbuf *sb) +sowakeup_aio(struct socket *so, sb_which which) { + struct sockbuf *sb = sobuf(so, which); + struct mtx *mtx = soeventmtx(so, which); + + mtx_assert(mtx, MA_OWNED); - SOCKBUF_LOCK_ASSERT(sb); sb->sb_flags &= ~SB_AIO; if (sb->sb_flags & SB_AIO_RUNNING) return; @@ -799,6 +802,7 @@ { struct socket *so; struct sockbuf *sb; + sb_which which; int error; so = fp->f_data; @@ -809,12 +813,14 @@ /* Lock through the socket, since this may be a listening socket. */ switch (job->uaiocb.aio_lio_opcode & (LIO_WRITE | LIO_READ)) { case LIO_READ: - sb = &so->so_rcv; SOCK_RECVBUF_LOCK(so); + sb = &so->so_rcv; + which = SO_RCV; break; case LIO_WRITE: - sb = &so->so_snd; SOCK_SENDBUF_LOCK(so); + sb = &so->so_snd; + which = SO_SND; break; default: return (EINVAL); @@ -833,7 +839,7 @@ TAILQ_INSERT_TAIL(&sb->sb_aiojobq, job, list); if (!(sb->sb_flags & SB_AIO_RUNNING)) { if (soaio_ready(so, sb)) - sowakeup_aio(so, sb); + sowakeup_aio(so, which); else sb->sb_flags |= SB_AIO; } Index: sys/kern/uipc_sockbuf.c =================================================================== --- sys/kern/uipc_sockbuf.c +++ sys/kern/uipc_sockbuf.c @@ -488,11 +488,13 @@ * correct. */ void -sowakeup(struct socket *so, struct sockbuf *sb) +sowakeup(struct socket *so, sb_which which) { + struct sockbuf *sb = sobuf(so, which); + struct mtx *mtx = soeventmtx(so, which); int ret; - SOCKBUF_LOCK_ASSERT(sb); + mtx_assert(mtx, MA_OWNED); selwakeuppri(sb->sb_sel, PSOCK); if (!SEL_WAITING(sb->sb_sel)) @@ -512,13 +514,13 @@ } else ret = SU_OK; if (sb->sb_flags & SB_AIO) - sowakeup_aio(so, sb); - SOCKBUF_UNLOCK(sb); + sowakeup_aio(so, which); + mtx_unlock(mtx); if (ret == SU_ISCONNECTED) soisconnected(so); if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) pgsigio(&so->so_sigio, SIGIO, 0); - mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED); + mtx_assert(mtx, MA_NOTOWNED); } /* Index: sys/sys/socketvar.h =================================================================== --- sys/sys/socketvar.h +++ sys/sys/socketvar.h @@ -77,8 +77,8 @@ * Locking key to struct socket: * (a) constant after allocation, no locking required. * (b) locked by SOCK_LOCK(so). - * (cr) locked by SOCK_RECVBUF_LOCK(so)/SOCKBUF_LOCK(&so->so_rcv). - * (cs) locked by SOCK_SENDBUF_LOCK(so)/SOCKBUF_LOCK(&so->so_snd). + * (cr) locked by SOCK_RECVBUF_LOCK(so) + * (cs) locked by SOCK_SENDBUF_LOCK(so) * (e) locked by SOLISTEN_LOCK() of corresponding listening socket. * (f) not locked since integer reads/writes are atomic. * (g) used only as a sleep/wakeup address, no value. @@ -256,8 +256,8 @@ } while (0) /* - * Socket buffer locks. These manipulate the same mutexes as SOCKBUF_LOCK() - * and related macros. + * Socket buffer locks. These are strongly preferred over SOCKBUF_LOCK(sb) + * macros, as we are moving towards protocol specific socket buffers. */ #define SOCK_RECVBUF_MTX(so) \ (&(so)->so_rcv_mtx) @@ -284,6 +284,20 @@ /* 'which' values for socket buffer events and upcalls. */ typedef enum { SO_RCV, SO_SND } sb_which; +static inline struct sockbuf * +sobuf(struct socket *so, sb_which which) +{ + + return (which == SO_RCV ? &so->so_rcv : &so->so_snd); +} + +static inline struct mtx * +soeventmtx(struct socket *so, sb_which which) +{ + + return (which == SO_RCV ? SOCK_RECVBUF_MTX(so) : SOCK_SENDBUF_MTX(so)); +} + /* * Macros for sockets and socket buffering. */ @@ -358,28 +372,28 @@ * maintain the same semantics. */ #define sorwakeup_locked(so) do { \ - SOCKBUF_LOCK_ASSERT(&(so)->so_rcv); \ + SOCK_RECVBUF_LOCK_ASSERT(so); \ if (sb_notify(&(so)->so_rcv)) \ - sowakeup((so), &(so)->so_rcv); \ + sowakeup((so), SO_RCV); \ else \ - SOCKBUF_UNLOCK(&(so)->so_rcv); \ + SOCK_RECVBUF_UNLOCK(so); \ } while (0) #define sorwakeup(so) do { \ - SOCKBUF_LOCK(&(so)->so_rcv); \ + SOCK_RECVBUF_LOCK(so); \ sorwakeup_locked(so); \ } while (0) #define sowwakeup_locked(so) do { \ - SOCKBUF_LOCK_ASSERT(&(so)->so_snd); \ + SOCK_SENDBUF_LOCK_ASSERT(so); \ if (sb_notify(&(so)->so_snd)) \ - sowakeup((so), &(so)->so_snd); \ + sowakeup((so), SO_SND); \ else \ - SOCKBUF_UNLOCK(&(so)->so_snd); \ + SOCK_SENDBUF_UNLOCK(so); \ } while (0) #define sowwakeup(so) do { \ - SOCKBUF_LOCK(&(so)->so_snd); \ + SOCK_SENDBUF_LOCK(so); \ sowwakeup_locked(so); \ } while (0) @@ -520,8 +534,8 @@ void soupcall_clear(struct socket *, sb_which); void soupcall_set(struct socket *, sb_which, so_upcall_t, void *); void solisten_upcall_set(struct socket *, so_upcall_t, void *); -void sowakeup(struct socket *so, struct sockbuf *sb); -void sowakeup_aio(struct socket *so, struct sockbuf *sb); +void sowakeup(struct socket *, sb_which); +void sowakeup_aio(struct socket *, sb_which); void solisten_wakeup(struct socket *); int selsocket(struct socket *so, int events, struct timeval *tv, struct thread *td);