Changeset View
Changeset View
Standalone View
Standalone View
sys/kern/uipc_sockbuf.c
Show First 20 Lines • Show All 390 Lines • ▼ Show 20 Lines | |||||
* received, and will normally be applied to the socket by a protocol when it | * received, and will normally be applied to the socket by a protocol when it | ||||
* detects that the peer will send no more data. Data queued for reading in | * detects that the peer will send no more data. Data queued for reading in | ||||
* the socket may yet be read. | * the socket may yet be read. | ||||
*/ | */ | ||||
void | void | ||||
socantsendmore_locked(struct socket *so) | socantsendmore_locked(struct socket *so) | ||||
{ | { | ||||
SOCKBUF_LOCK_ASSERT(&so->so_snd); | SOCK_SENDBUF_LOCK_ASSERT(so); | ||||
so->so_snd.sb_state |= SBS_CANTSENDMORE; | so->so_snd.sb_state |= SBS_CANTSENDMORE; | ||||
sowwakeup_locked(so); | sowwakeup_locked(so); | ||||
mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED); | SOCK_SENDBUF_UNLOCK_ASSERT(so); | ||||
} | } | ||||
void | void | ||||
socantsendmore(struct socket *so) | socantsendmore(struct socket *so) | ||||
{ | { | ||||
SOCKBUF_LOCK(&so->so_snd); | SOCK_SENDBUF_LOCK(so); | ||||
socantsendmore_locked(so); | socantsendmore_locked(so); | ||||
mtx_assert(SOCKBUF_MTX(&so->so_snd), MA_NOTOWNED); | SOCK_SENDBUF_UNLOCK_ASSERT(so); | ||||
} | } | ||||
void | void | ||||
socantrcvmore_locked(struct socket *so) | socantrcvmore_locked(struct socket *so) | ||||
{ | { | ||||
SOCKBUF_LOCK_ASSERT(&so->so_rcv); | SOCK_RECVBUF_LOCK_ASSERT(so); | ||||
so->so_rcv.sb_state |= SBS_CANTRCVMORE; | so->so_rcv.sb_state |= SBS_CANTRCVMORE; | ||||
#ifdef KERN_TLS | #ifdef KERN_TLS | ||||
if (so->so_rcv.sb_flags & SB_TLS_RX) | if (so->so_rcv.sb_flags & SB_TLS_RX) | ||||
ktls_check_rx(&so->so_rcv); | ktls_check_rx(&so->so_rcv); | ||||
#endif | #endif | ||||
sorwakeup_locked(so); | sorwakeup_locked(so); | ||||
mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED); | SOCK_RECVBUF_UNLOCK_ASSERT(so); | ||||
} | } | ||||
void | void | ||||
socantrcvmore(struct socket *so) | socantrcvmore(struct socket *so) | ||||
{ | { | ||||
SOCKBUF_LOCK(&so->so_rcv); | SOCK_RECVBUF_LOCK(so); | ||||
socantrcvmore_locked(so); | socantrcvmore_locked(so); | ||||
mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED); | SOCK_RECVBUF_UNLOCK_ASSERT(so); | ||||
} | } | ||||
void | void | ||||
soroverflow_locked(struct socket *so) | soroverflow_locked(struct socket *so) | ||||
{ | { | ||||
SOCKBUF_LOCK_ASSERT(&so->so_rcv); | SOCK_RECVBUF_LOCK_ASSERT(so); | ||||
if (so->so_options & SO_RERROR) { | if (so->so_options & SO_RERROR) { | ||||
so->so_rerror = ENOBUFS; | so->so_rerror = ENOBUFS; | ||||
sorwakeup_locked(so); | sorwakeup_locked(so); | ||||
} else | } else | ||||
SOCKBUF_UNLOCK(&so->so_rcv); | SOCK_RECVBUF_UNLOCK(so); | ||||
mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED); | SOCK_RECVBUF_UNLOCK_ASSERT(so); | ||||
} | } | ||||
void | void | ||||
soroverflow(struct socket *so) | soroverflow(struct socket *so) | ||||
{ | { | ||||
SOCKBUF_LOCK(&so->so_rcv); | SOCK_RECVBUF_LOCK(so); | ||||
soroverflow_locked(so); | soroverflow_locked(so); | ||||
mtx_assert(SOCKBUF_MTX(&so->so_rcv), MA_NOTOWNED); | SOCK_RECVBUF_UNLOCK_ASSERT(so); | ||||
} | } | ||||
/* | /* | ||||
* Wait for data to arrive at/drain from a socket buffer. | * Wait for data to arrive at/drain from a socket buffer. | ||||
*/ | */ | ||||
int | int | ||||
sbwait(struct sockbuf *sb) | sbwait(struct socket *so, sb_which which) | ||||
{ | { | ||||
struct sockbuf *sb; | |||||
SOCKBUF_LOCK_ASSERT(sb); | SOCK_BUF_LOCK_ASSERT(so, which); | ||||
sb = sobuf(so, which); | |||||
sb->sb_flags |= SB_WAIT; | sb->sb_flags |= SB_WAIT; | ||||
return (msleep_sbt(&sb->sb_acc, SOCKBUF_MTX(sb), | return (msleep_sbt(&sb->sb_acc, soeventmtx(so, which), | ||||
(sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait", | (sb->sb_flags & SB_NOINTR) ? PSOCK : PSOCK | PCATCH, "sbwait", | ||||
sb->sb_timeo, 0, 0)); | sb->sb_timeo, 0, 0)); | ||||
} | } | ||||
/* | /* | ||||
* Wakeup processes waiting on a socket buffer. Do asynchronous notification | * Wakeup processes waiting on a socket buffer. Do asynchronous notification | ||||
* via SIGIO if the socket has the SS_ASYNC flag set. | * via SIGIO if the socket has the SS_ASYNC flag set. | ||||
* | * | ||||
* Called with the socket buffer lock held; will release the lock by the end | * Called with the socket buffer lock held; will release the lock by the end | ||||
* of the function. This allows the caller to acquire the socket buffer lock | * of the function. This allows the caller to acquire the socket buffer lock | ||||
* while testing for the need for various sorts of wakeup and hold it through | * while testing for the need for various sorts of wakeup and hold it through | ||||
* to the point where it's no longer required. We currently hold the lock | * to the point where it's no longer required. We currently hold the lock | ||||
* through calls out to other subsystems (with the exception of kqueue), and | * through calls out to other subsystems (with the exception of kqueue), and | ||||
* then release it to avoid lock order issues. It's not clear that's | * then release it to avoid lock order issues. It's not clear that's | ||||
* correct. | * correct. | ||||
*/ | */ | ||||
void | static __always_inline void | ||||
sowakeup(struct socket *so, struct sockbuf *sb) | sowakeup(struct socket *so, const sb_which which) | ||||
{ | { | ||||
struct sockbuf *sb; | |||||
int ret; | int ret; | ||||
SOCKBUF_LOCK_ASSERT(sb); | SOCK_BUF_LOCK_ASSERT(so, which); | ||||
sb = sobuf(so, which); | |||||
selwakeuppri(sb->sb_sel, PSOCK); | selwakeuppri(sb->sb_sel, PSOCK); | ||||
if (!SEL_WAITING(sb->sb_sel)) | if (!SEL_WAITING(sb->sb_sel)) | ||||
sb->sb_flags &= ~SB_SEL; | sb->sb_flags &= ~SB_SEL; | ||||
if (sb->sb_flags & SB_WAIT) { | if (sb->sb_flags & SB_WAIT) { | ||||
sb->sb_flags &= ~SB_WAIT; | sb->sb_flags &= ~SB_WAIT; | ||||
wakeup(&sb->sb_acc); | wakeup(&sb->sb_acc); | ||||
} | } | ||||
KNOTE_LOCKED(&sb->sb_sel->si_note, 0); | KNOTE_LOCKED(&sb->sb_sel->si_note, 0); | ||||
if (sb->sb_upcall != NULL) { | if (sb->sb_upcall != NULL) { | ||||
ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT); | ret = sb->sb_upcall(so, sb->sb_upcallarg, M_NOWAIT); | ||||
if (ret == SU_ISCONNECTED) { | if (ret == SU_ISCONNECTED) { | ||||
KASSERT(sb == &so->so_rcv, | KASSERT(sb == &so->so_rcv, | ||||
("SO_SND upcall returned SU_ISCONNECTED")); | ("SO_SND upcall returned SU_ISCONNECTED")); | ||||
soupcall_clear(so, SO_RCV); | soupcall_clear(so, SO_RCV); | ||||
} | } | ||||
} else | } else | ||||
ret = SU_OK; | ret = SU_OK; | ||||
if (sb->sb_flags & SB_AIO) | if (sb->sb_flags & SB_AIO) | ||||
sowakeup_aio(so, sb); | sowakeup_aio(so, which); | ||||
SOCKBUF_UNLOCK(sb); | SOCK_BUF_UNLOCK(so, which); | ||||
if (ret == SU_ISCONNECTED) | if (ret == SU_ISCONNECTED) | ||||
soisconnected(so); | soisconnected(so); | ||||
if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) | if ((so->so_state & SS_ASYNC) && so->so_sigio != NULL) | ||||
pgsigio(&so->so_sigio, SIGIO, 0); | pgsigio(&so->so_sigio, SIGIO, 0); | ||||
mtx_assert(SOCKBUF_MTX(sb), MA_NOTOWNED); | SOCK_BUF_UNLOCK_ASSERT(so, which); | ||||
} | } | ||||
void | |||||
sorwakeup_locked(struct socket *so) | |||||
{ | |||||
SOCK_RECVBUF_LOCK_ASSERT(so); | |||||
if (sb_notify(&so->so_rcv)) | |||||
sowakeup(so, SO_RCV); | |||||
else | |||||
SOCK_RECVBUF_UNLOCK(so); | |||||
} | |||||
void | |||||
sowwakeup_locked(struct socket *so) | |||||
{ | |||||
SOCK_SENDBUF_LOCK_ASSERT(so); | |||||
if (sb_notify(&so->so_snd)) | |||||
sowakeup(so, SO_SND); | |||||
else | |||||
SOCK_SENDBUF_UNLOCK(so); | |||||
} | |||||
markj: For frequently called functions where `which` is fixed at compile time, it may be profitable to… | |||||
Done Inline ActionsGood idea. While implementing it, found out that sowakeup() is never used directly, so patch going to be even smaller. glebius: Good idea. While implementing it, found out that sowakeup() is never used directly, so patch… | |||||
/* | /* | ||||
* Socket buffer (struct sockbuf) utility routines. | * Socket buffer (struct sockbuf) utility routines. | ||||
* | * | ||||
* Each socket contains two socket buffers: one for sending data and one for | * Each socket contains two socket buffers: one for sending data and one for | ||||
* receiving data. Each buffer contains a queue of mbufs, information about | * receiving data. Each buffer contains a queue of mbufs, information about | ||||
* the number of mbufs and amount of data in the queue, and other fields | * the number of mbufs and amount of data in the queue, and other fields | ||||
* allowing select() statements and notification on data availability to be | * allowing select() statements and notification on data availability to be | ||||
* implemented. | * implemented. | ||||
Show All 20 Lines | |||||
* socket (currently, it does nothing but enforce limits). The space should | * socket (currently, it does nothing but enforce limits). The space should | ||||
* be released by calling sbrelease() when the socket is destroyed. | * be released by calling sbrelease() when the socket is destroyed. | ||||
*/ | */ | ||||
int | int | ||||
soreserve(struct socket *so, u_long sndcc, u_long rcvcc) | soreserve(struct socket *so, u_long sndcc, u_long rcvcc) | ||||
{ | { | ||||
struct thread *td = curthread; | struct thread *td = curthread; | ||||
SOCKBUF_LOCK(&so->so_snd); | SOCK_SENDBUF_LOCK(so); | ||||
SOCKBUF_LOCK(&so->so_rcv); | SOCK_RECVBUF_LOCK(so); | ||||
if (sbreserve_locked(&so->so_snd, sndcc, so, td) == 0) | if (sbreserve_locked(so, SO_SND, sndcc, td) == 0) | ||||
goto bad; | goto bad; | ||||
if (sbreserve_locked(&so->so_rcv, rcvcc, so, td) == 0) | if (sbreserve_locked(so, SO_RCV, rcvcc, td) == 0) | ||||
goto bad2; | goto bad2; | ||||
if (so->so_rcv.sb_lowat == 0) | if (so->so_rcv.sb_lowat == 0) | ||||
so->so_rcv.sb_lowat = 1; | so->so_rcv.sb_lowat = 1; | ||||
if (so->so_snd.sb_lowat == 0) | if (so->so_snd.sb_lowat == 0) | ||||
so->so_snd.sb_lowat = MCLBYTES; | so->so_snd.sb_lowat = MCLBYTES; | ||||
if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) | if (so->so_snd.sb_lowat > so->so_snd.sb_hiwat) | ||||
so->so_snd.sb_lowat = so->so_snd.sb_hiwat; | so->so_snd.sb_lowat = so->so_snd.sb_hiwat; | ||||
SOCKBUF_UNLOCK(&so->so_rcv); | SOCK_RECVBUF_UNLOCK(so); | ||||
SOCKBUF_UNLOCK(&so->so_snd); | SOCK_SENDBUF_UNLOCK(so); | ||||
return (0); | return (0); | ||||
bad2: | bad2: | ||||
sbrelease_locked(&so->so_snd, so); | sbrelease_locked(so, SO_SND); | ||||
bad: | bad: | ||||
SOCKBUF_UNLOCK(&so->so_rcv); | SOCK_RECVBUF_UNLOCK(so); | ||||
SOCKBUF_UNLOCK(&so->so_snd); | SOCK_SENDBUF_UNLOCK(so); | ||||
return (ENOBUFS); | return (ENOBUFS); | ||||
} | } | ||||
static int | static int | ||||
sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS) | sysctl_handle_sb_max(SYSCTL_HANDLER_ARGS) | ||||
{ | { | ||||
int error = 0; | int error = 0; | ||||
u_long tmp_sb_max = sb_max; | u_long tmp_sb_max = sb_max; | ||||
error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req); | error = sysctl_handle_long(oidp, &tmp_sb_max, arg2, req); | ||||
if (error || !req->newptr) | if (error || !req->newptr) | ||||
return (error); | return (error); | ||||
if (tmp_sb_max < MSIZE + MCLBYTES) | if (tmp_sb_max < MSIZE + MCLBYTES) | ||||
return (EINVAL); | return (EINVAL); | ||||
sb_max = tmp_sb_max; | sb_max = tmp_sb_max; | ||||
sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); | sb_max_adj = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); | ||||
return (0); | return (0); | ||||
} | } | ||||
/* | /* | ||||
* Allot mbufs to a sockbuf. Attempt to scale mbmax so that mbcnt doesn't | * Allot mbufs to a sockbuf. Attempt to scale mbmax so that mbcnt doesn't | ||||
* become limiting if buffering efficiency is near the normal case. | * become limiting if buffering efficiency is near the normal case. | ||||
*/ | */ | ||||
int | bool | ||||
sbreserve_locked(struct sockbuf *sb, u_long cc, struct socket *so, | sbreserve_locked(struct socket *so, sb_which which, u_long cc, | ||||
struct thread *td) | struct thread *td) | ||||
{ | { | ||||
struct sockbuf *sb = sobuf(so, which); | |||||
rlim_t sbsize_limit; | rlim_t sbsize_limit; | ||||
SOCKBUF_LOCK_ASSERT(sb); | SOCK_BUF_LOCK_ASSERT(so, which); | ||||
/* | /* | ||||
* When a thread is passed, we take into account the thread's socket | * When a thread is passed, we take into account the thread's socket | ||||
* buffer size limit. The caller will generally pass curthread, but | * buffer size limit. The caller will generally pass curthread, but | ||||
* in the TCP input path, NULL will be passed to indicate that no | * in the TCP input path, NULL will be passed to indicate that no | ||||
* appropriate thread resource limits are available. In that case, | * appropriate thread resource limits are available. In that case, | ||||
* we don't apply a process limit. | * we don't apply a process limit. | ||||
*/ | */ | ||||
if (cc > sb_max_adj) | if (cc > sb_max_adj) | ||||
return (0); | return (false); | ||||
if (td != NULL) { | if (td != NULL) { | ||||
sbsize_limit = lim_cur(td, RLIMIT_SBSIZE); | sbsize_limit = lim_cur(td, RLIMIT_SBSIZE); | ||||
} else | } else | ||||
sbsize_limit = RLIM_INFINITY; | sbsize_limit = RLIM_INFINITY; | ||||
if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc, | if (!chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, cc, | ||||
sbsize_limit)) | sbsize_limit)) | ||||
return (0); | return (false); | ||||
sb->sb_mbmax = min(cc * sb_efficiency, sb_max); | sb->sb_mbmax = min(cc * sb_efficiency, sb_max); | ||||
if (sb->sb_lowat > sb->sb_hiwat) | if (sb->sb_lowat > sb->sb_hiwat) | ||||
sb->sb_lowat = sb->sb_hiwat; | sb->sb_lowat = sb->sb_hiwat; | ||||
return (1); | return (true); | ||||
} | } | ||||
int | int | ||||
sbsetopt(struct socket *so, int cmd, u_long cc) | sbsetopt(struct socket *so, int cmd, u_long cc) | ||||
{ | { | ||||
struct sockbuf *sb; | struct sockbuf *sb; | ||||
sb_which wh; | |||||
short *flags; | short *flags; | ||||
u_int *hiwat, *lowat; | u_int *hiwat, *lowat; | ||||
int error; | int error; | ||||
sb = NULL; | sb = NULL; | ||||
SOCK_LOCK(so); | SOCK_LOCK(so); | ||||
if (SOLISTENING(so)) { | if (SOLISTENING(so)) { | ||||
switch (cmd) { | switch (cmd) { | ||||
Show All 10 Lines | switch (cmd) { | ||||
flags = &so->sol_sbrcv_flags; | flags = &so->sol_sbrcv_flags; | ||||
break; | break; | ||||
} | } | ||||
} else { | } else { | ||||
switch (cmd) { | switch (cmd) { | ||||
case SO_SNDLOWAT: | case SO_SNDLOWAT: | ||||
case SO_SNDBUF: | case SO_SNDBUF: | ||||
sb = &so->so_snd; | sb = &so->so_snd; | ||||
wh = SO_SND; | |||||
break; | break; | ||||
case SO_RCVLOWAT: | case SO_RCVLOWAT: | ||||
case SO_RCVBUF: | case SO_RCVBUF: | ||||
sb = &so->so_rcv; | sb = &so->so_rcv; | ||||
wh = SO_RCV; | |||||
break; | break; | ||||
} | } | ||||
flags = &sb->sb_flags; | flags = &sb->sb_flags; | ||||
hiwat = &sb->sb_hiwat; | hiwat = &sb->sb_hiwat; | ||||
lowat = &sb->sb_lowat; | lowat = &sb->sb_lowat; | ||||
SOCKBUF_LOCK(sb); | SOCK_BUF_LOCK(so, wh); | ||||
} | } | ||||
error = 0; | error = 0; | ||||
switch (cmd) { | switch (cmd) { | ||||
case SO_SNDBUF: | case SO_SNDBUF: | ||||
case SO_RCVBUF: | case SO_RCVBUF: | ||||
if (SOLISTENING(so)) { | if (SOLISTENING(so)) { | ||||
if (cc > sb_max_adj) { | if (cc > sb_max_adj) { | ||||
error = ENOBUFS; | error = ENOBUFS; | ||||
break; | break; | ||||
} | } | ||||
*hiwat = cc; | *hiwat = cc; | ||||
if (*lowat > *hiwat) | if (*lowat > *hiwat) | ||||
*lowat = *hiwat; | *lowat = *hiwat; | ||||
} else { | } else { | ||||
if (!sbreserve_locked(sb, cc, so, curthread)) | if (!sbreserve_locked(so, wh, cc, curthread)) | ||||
error = ENOBUFS; | error = ENOBUFS; | ||||
} | } | ||||
if (error == 0) | if (error == 0) | ||||
*flags &= ~SB_AUTOSIZE; | *flags &= ~SB_AUTOSIZE; | ||||
break; | break; | ||||
case SO_SNDLOWAT: | case SO_SNDLOWAT: | ||||
case SO_RCVLOWAT: | case SO_RCVLOWAT: | ||||
/* | /* | ||||
* Make sure the low-water is never greater than the | * Make sure the low-water is never greater than the | ||||
* high-water. | * high-water. | ||||
*/ | */ | ||||
*lowat = (cc > *hiwat) ? *hiwat : cc; | *lowat = (cc > *hiwat) ? *hiwat : cc; | ||||
break; | break; | ||||
} | } | ||||
if (!SOLISTENING(so)) | if (!SOLISTENING(so)) | ||||
SOCKBUF_UNLOCK(sb); | SOCK_BUF_UNLOCK(so, wh); | ||||
SOCK_UNLOCK(so); | SOCK_UNLOCK(so); | ||||
return (error); | return (error); | ||||
} | } | ||||
/* | /* | ||||
* Free mbufs held by a socket, and reserved mbuf space. | * Free mbufs held by a socket, and reserved mbuf space. | ||||
*/ | */ | ||||
static void | static void | ||||
sbrelease_internal(struct sockbuf *sb, struct socket *so) | sbrelease_internal(struct socket *so, sb_which which) | ||||
{ | { | ||||
struct sockbuf *sb = sobuf(so, which); | |||||
sbflush_internal(sb); | sbflush_internal(sb); | ||||
(void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, | (void)chgsbsize(so->so_cred->cr_uidinfo, &sb->sb_hiwat, 0, | ||||
RLIM_INFINITY); | RLIM_INFINITY); | ||||
sb->sb_mbmax = 0; | sb->sb_mbmax = 0; | ||||
} | } | ||||
void | void | ||||
sbrelease_locked(struct sockbuf *sb, struct socket *so) | sbrelease_locked(struct socket *so, sb_which which) | ||||
{ | { | ||||
SOCKBUF_LOCK_ASSERT(sb); | SOCK_BUF_LOCK_ASSERT(so, which); | ||||
sbrelease_internal(sb, so); | sbrelease_internal(so, which); | ||||
} | } | ||||
void | void | ||||
sbrelease(struct sockbuf *sb, struct socket *so) | sbrelease(struct socket *so, sb_which which) | ||||
{ | { | ||||
SOCKBUF_LOCK(sb); | SOCK_BUF_LOCK(so, which); | ||||
sbrelease_locked(sb, so); | sbrelease_locked(so, which); | ||||
SOCKBUF_UNLOCK(sb); | SOCK_BUF_UNLOCK(so, which); | ||||
} | } | ||||
void | void | ||||
sbdestroy(struct sockbuf *sb, struct socket *so) | sbdestroy(struct socket *so, sb_which which) | ||||
{ | { | ||||
sbrelease_internal(sb, so); | |||||
#ifdef KERN_TLS | #ifdef KERN_TLS | ||||
struct sockbuf *sb = sobuf(so, which); | |||||
if (sb->sb_tls_info != NULL) | if (sb->sb_tls_info != NULL) | ||||
ktls_free(sb->sb_tls_info); | ktls_free(sb->sb_tls_info); | ||||
sb->sb_tls_info = NULL; | sb->sb_tls_info = NULL; | ||||
#endif | #endif | ||||
sbrelease_internal(so, which); | |||||
} | } | ||||
/* | /* | ||||
* Routines to add and remove data from an mbuf queue. | * Routines to add and remove data from an mbuf queue. | ||||
* | * | ||||
* The routines sbappend() or sbappendrecord() are normally called to append | * The routines sbappend() or sbappendrecord() are normally called to append | ||||
* new mbufs to a socket buffer, after checking that adequate space is | * new mbufs to a socket buffer, after checking that adequate space is | ||||
* available, comparing the function sbspace() with the amount of data to be | * available, comparing the function sbspace() with the amount of data to be | ||||
▲ Show 20 Lines • Show All 1,034 Lines • Show Last 20 Lines |
For frequently called functions where which is fixed at compile time, it may be profitable to parameterize over which. For example, in sockbuf.h, have:
and in uipc_sockbuf.c:
The compiler can eliminate the conditional in sowakeup() in many cases since which is known at compile time. And if you force _sowakeup() to be inlined, the compiler can substitute fixed values for which and eliminate some more branches.
I'm not sure if it is worth it, and it makes the KPIs less general (you might want to add a third socket buffer for some reason).