diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c --- a/sys/dev/cxgbe/tom/t4_cpl_io.c +++ b/sys/dev/cxgbe/tom/t4_cpl_io.c @@ -703,7 +703,7 @@ for (m = sndptr; m != NULL; m = m->m_next) { int n; - if ((m->m_flags & M_NOTAVAIL) != 0) + if ((m->m_flags & M_NOTREADY) != 0) break; if (m->m_flags & M_EXTPG) { #ifdef KERN_TLS @@ -787,7 +787,7 @@ /* nothing to send */ if (plen == 0) { - KASSERT(m == NULL || (m->m_flags & M_NOTAVAIL) != 0, + KASSERT(m == NULL || (m->m_flags & M_NOTREADY) != 0, ("%s: nothing to send, but m != NULL is ready", __func__)); break; @@ -880,7 +880,7 @@ toep->txsd_avail--; t4_l2t_send(sc, wr, toep->l2te); - } while (m != NULL && (m->m_flags & M_NOTAVAIL) == 0); + } while (m != NULL && (m->m_flags & M_NOTREADY) == 0); /* Send a FIN if requested, but only if there's no more data to send */ if (m == NULL && toep->flags & TPF_SEND_FIN) diff --git a/sys/dev/cxgbe/tom/t4_tls.c b/sys/dev/cxgbe/tom/t4_tls.c --- a/sys/dev/cxgbe/tom/t4_tls.c +++ b/sys/dev/cxgbe/tom/t4_tls.c @@ -563,7 +563,7 @@ * If there is no ready data to send, wait until more * data arrives. */ - if (m == NULL || (m->m_flags & M_NOTAVAIL) != 0) { + if (m == NULL || (m->m_flags & M_NOTREADY) != 0) { if (sowwakeup) sowwakeup_locked(so); else @@ -614,7 +614,7 @@ /* Shove if there is no additional data pending. */ shove = ((m->m_next == NULL || - (m->m_next->m_flags & M_NOTAVAIL) != 0)) && + (m->m_next->m_flags & M_NOTREADY) != 0)) && (tp->t_flags & TF_MORETOCOME) == 0; if (sb->sb_flags & SB_AUTOSIZE && diff --git a/sys/kern/uipc_ktls.c b/sys/kern/uipc_ktls.c --- a/sys/kern/uipc_ktls.c +++ b/sys/kern/uipc_ktls.c @@ -1207,7 +1207,7 @@ for (; m != NULL; m = m->m_next) { KASSERT(m->m_nextpkt == NULL, ("%s: m_nextpkt != NULL", __func__)); - KASSERT((m->m_flags & M_NOTAVAIL) == 0, ("%s: mbuf not avail", + KASSERT((m->m_flags & M_NOTREADY) == 0, ("%s: mbuf not ready", __func__)); KASSERT(sb->sb_acc >= m->m_len, ("%s: sb_acc < m->m_len", __func__)); diff --git a/sys/kern/uipc_sockbuf.c b/sys/kern/uipc_sockbuf.c --- a/sys/kern/uipc_sockbuf.c +++ b/sys/kern/uipc_sockbuf.c @@ -195,14 +195,14 @@ sbready(struct sockbuf *sb, struct mbuf *m0, int count) { struct mbuf *m; - u_int blocker; + bool blocker; SOCKBUF_LOCK_ASSERT(sb); KASSERT(sb->sb_fnrdy != NULL, ("%s: sb %p NULL fnrdy", __func__, sb)); KASSERT(count > 0, ("%s: invalid count %d", __func__, count)); m = m0; - blocker = (sb->sb_fnrdy == m) ? M_BLOCKED : 0; + blocker = (sb->sb_fnrdy == m); while (count > 0) { KASSERT(m->m_flags & M_NOTREADY, @@ -217,8 +217,7 @@ m->m_epg_nrdy = 0; } else count--; - - m->m_flags &= ~(M_NOTREADY | blocker); + m->m_flags &= ~M_NOTREADY; if (blocker) sb->sb_acc += m->m_len; m = m->m_next; @@ -240,12 +239,8 @@ } /* This one was blocking all the queue. */ - for (; m && (m->m_flags & M_NOTREADY) == 0; m = m->m_next) { - KASSERT(m->m_flags & M_BLOCKED, - ("%s: m %p !M_BLOCKED", __func__, m)); - m->m_flags &= ~M_BLOCKED; + for (; m && (m->m_flags & M_NOTREADY) == 0; m = m->m_next) sb->sb_acc += m->m_len; - } sb->sb_fnrdy = m; sbready_compress(sb, m0, m); @@ -269,8 +264,7 @@ sb->sb_fnrdy = m; else sb->sb_acc += m->m_len; - } else - m->m_flags |= M_BLOCKED; + } if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) sb->sb_ctl += m->m_len; @@ -287,29 +281,29 @@ void sbfree(struct sockbuf *sb, struct mbuf *m) { + struct mbuf *n; #if 0 /* XXX: not yet: soclose() call path comes here w/o lock. */ SOCKBUF_LOCK_ASSERT(sb); #endif - sb->sb_ccc -= m->m_len; - if (!(m->m_flags & M_NOTAVAIL)) - sb->sb_acc -= m->m_len; - if (m == sb->sb_fnrdy) { - struct mbuf *n; - KASSERT(m->m_flags & M_NOTREADY, ("%s: m %p !M_NOTREADY", __func__, m)); n = m->m_next; while (n != NULL && !(n->m_flags & M_NOTREADY)) { - n->m_flags &= ~M_BLOCKED; sb->sb_acc += n->m_len; n = n->m_next; } sb->sb_fnrdy = n; + } else { + /* Assert that mbuf is not behind sb_fnrdy. */ + for (n = sb->sb_fnrdy; n != NULL; n = n->m_next) + KASSERT(n != m, ("%s: sb %p freeing %p behind sb_fnrdy", + __func__, sb, m)); + sb->sb_acc -= m->m_len; } if (m->m_type != MT_DATA && m->m_type != MT_OOBDATA) @@ -1129,13 +1123,7 @@ } fnrdy = m; } - if (fnrdy) { - if (!(m->m_flags & M_NOTAVAIL)) { - printf("sb %p: fnrdy %p, m %p is avail\n", - sb, sb->sb_fnrdy, m); - goto fail; - } - } else + if (fnrdy == NULL) acc += m->m_len; ccc += m->m_len; mbcnt += MSIZE; @@ -1602,8 +1590,8 @@ next = m->m_nextpkt; } if (m->m_len > len) { - KASSERT(!(m->m_flags & M_NOTAVAIL), - ("%s: m %p M_NOTAVAIL", __func__, m)); + KASSERT(!(m->m_flags & M_NOTREADY), + ("%s: m %p M_NOTREADY", __func__, m)); m->m_len -= len; m->m_data += len; sb->sb_ccc -= len; diff --git a/sys/kern/uipc_socket.c b/sys/kern/uipc_socket.c --- a/sys/kern/uipc_socket.c +++ b/sys/kern/uipc_socket.c @@ -3342,7 +3342,7 @@ for (m = sb->sb_mb; m != NULL && m->m_len <= len; m = m->m_next) { - KASSERT(!(m->m_flags & M_NOTAVAIL), + KASSERT(!(m->m_flags & M_NOTREADY), ("%s: m %p not available", __func__, m)); len -= m->m_len; uio->uio_resid -= m->m_len; diff --git a/sys/sys/sockbuf.h b/sys/sys/sockbuf.h --- a/sys/sys/sockbuf.h +++ b/sys/sys/sockbuf.h @@ -210,8 +210,6 @@ * Socket buffer private mbuf(9) flags. */ #define M_NOTREADY M_PROTO1 /* m_data not populated yet */ -#define M_BLOCKED M_PROTO2 /* M_NOTREADY in front of m */ -#define M_NOTAVAIL (M_NOTREADY | M_BLOCKED) void sbappend(struct sockbuf *sb, struct mbuf *m, int flags); void sbappend_locked(struct sockbuf *sb, struct mbuf *m, int flags);