Index: sys/kern/vfs_bio.c =================================================================== --- sys/kern/vfs_bio.c +++ sys/kern/vfs_bio.c @@ -661,8 +661,6 @@ long space; int diff; - KASSERT((bp->b_flags & B_MALLOC) == 0, - ("bufspace_adjust: malloc buf %p", bp)); bd = bufdomain(bp); diff = bufsize - bp->b_bufsize; if (diff < 0) { @@ -851,27 +849,6 @@ } /* - * bufmallocadjust: - * - * Adjust the reported bufspace for a malloc managed buffer, possibly - * waking any waiters. - */ -static void -bufmallocadjust(struct buf *bp, int bufsize) -{ - int diff; - - KASSERT((bp->b_flags & B_MALLOC) != 0, - ("bufmallocadjust: non-malloc buf %p", bp)); - diff = bufsize - bp->b_bufsize; - if (diff < 0) - atomic_subtract_long(&bufmallocspace, -diff); - else - atomic_add_long(&bufmallocspace, diff); - bp->b_bufsize = bufsize; -} - -/* * runningwakeup: * * Wake up processes that are waiting on asynchronous writes to fall @@ -4172,22 +4149,9 @@ /* * Truncate the backing store for a non-vmio buffer. */ -static void +static inline void vfs_nonvmio_truncate(struct buf *bp, int newbsize) { - - if (bp->b_flags & B_MALLOC) { - /* - * malloced buffers are not shrunk - */ - if (newbsize == 0) { - bufmallocadjust(bp, 0); - free(bp->b_data, M_BIOBUF); - bp->b_data = bp->b_kvabase; - bp->b_flags &= ~B_MALLOC; - } - return; - } vm_hold_free_pages(bp, newbsize); bufspace_adjust(bp, newbsize); } @@ -4195,51 +4159,11 @@ /* * Extend the backing for a non-VMIO buffer. */ -static void +static inline void vfs_nonvmio_extend(struct buf *bp, int newbsize) { - caddr_t origbuf; - int origbufsize; - - /* - * We only use malloced memory on the first allocation. - * and revert to page-allocated memory when the buffer - * grows. - * - * There is a potential smp race here that could lead - * to bufmallocspace slightly passing the max. It - * is probably extremely rare and not worth worrying - * over. - */ - if (bp->b_bufsize == 0 && newbsize <= PAGE_SIZE/2 && - bufmallocspace < maxbufmallocspace) { - bp->b_data = malloc(newbsize, M_BIOBUF, M_WAITOK); - bp->b_flags |= B_MALLOC; - bufmallocadjust(bp, newbsize); - return; - } - - /* - * If the buffer is growing on its other-than-first - * allocation then we revert to the page-allocation - * scheme. - */ - origbuf = NULL; - origbufsize = 0; - if (bp->b_flags & B_MALLOC) { - origbuf = bp->b_data; - origbufsize = bp->b_bufsize; - bp->b_data = bp->b_kvabase; - bufmallocadjust(bp, 0); - bp->b_flags &= ~B_MALLOC; - newbsize = round_page(newbsize); - } vm_hold_load_pages(bp, (vm_offset_t) bp->b_data + bp->b_bufsize, (vm_offset_t) bp->b_data + newbsize); - if (origbuf != NULL) { - bcopy(origbuf, bp->b_data, origbufsize); - free(origbuf, M_BIOBUF); - } bufspace_adjust(bp, newbsize); } @@ -4272,8 +4196,7 @@ newbsize = roundup2(size, DEV_BSIZE); if ((bp->b_flags & B_VMIO) == 0) { - if ((bp->b_flags & B_MALLOC) == 0) - newbsize = round_page(newbsize); + newbsize = round_page(newbsize); /* * Just get anonymous memory from the kernel. Don't * mess with B_CACHE. @@ -4288,8 +4211,6 @@ desiredpages = (size == 0) ? 0 : num_pages((bp->b_offset & PAGE_MASK) + newbsize); - if (bp->b_flags & B_MALLOC) - panic("allocbuf: VMIO buffer can't be malloced"); /* * Set B_CACHE initially if buffer is 0 length or will become * 0-length. @@ -4760,8 +4681,7 @@ vfs_bio_clrbuf(struct buf *bp) { int i, j, mask, sa, ea, slide; - - if ((bp->b_flags & (B_VMIO | B_MALLOC)) != B_VMIO) { + if ((bp->b_flags & B_VMIO) == 0) { clrbuf(bp); return; } Index: sys/kern/vfs_cluster.c =================================================================== --- sys/kern/vfs_cluster.c +++ sys/kern/vfs_cluster.c @@ -368,8 +368,7 @@ tbp->b_iocmd = BIO_READ; } tbp->b_blkno = blkno; - if( (tbp->b_flags & B_MALLOC) || - ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) + if ( ((tbp->b_flags & B_VMIO) == 0) || (run <= 1) ) return tbp; bp = trypbuf(&cluster_pbuf_freecnt); @@ -851,7 +850,7 @@ * up if the cluster was terminated prematurely--too much * hassle. */ - if (((tbp->b_flags & (B_CLUSTEROK | B_MALLOC | B_VMIO)) != + if (((tbp->b_flags & (B_CLUSTEROK | B_VMIO)) != (B_CLUSTEROK | B_VMIO)) || (tbp->b_bcount != tbp->b_bufsize) || (tbp->b_bcount != size) || Index: sys/sys/buf.h =================================================================== --- sys/sys/buf.h +++ sys/sys/buf.h @@ -185,9 +185,6 @@ * The 'entire buffer' is defined to be the range from * 0 through b_bcount. * - * B_MALLOC Request that the buffer be allocated from the malloc - * pool, DEV_BSIZE aligned instead of PAGE_SIZE aligned. - * * B_CLUSTEROK This flag is typically set for B_DELWRI buffers * by filesystems that allow clustering when the buffer * is fully dirty and indicates that it may be clustered @@ -223,7 +220,6 @@ #define B_INVAL 0x00002000 /* Does not contain valid info. */ #define B_BARRIER 0x00004000 /* Write this and all preceding first. */ #define B_NOCACHE 0x00008000 /* Do not cache block after use. */ -#define B_MALLOC 0x00010000 /* malloced b_data */ #define B_CLUSTEROK 0x00020000 /* Pagein op, so swap() can count it. */ #define B_00040000 0x00040000 /* Available flag. */ #define B_00080000 0x00080000 /* Available flag. */ Index: sys/ufs/ffs/ffs_alloc.c =================================================================== --- sys/ufs/ffs/ffs_alloc.c +++ sys/ufs/ffs/ffs_alloc.c @@ -338,7 +338,7 @@ allocbuf(bp, nsize); bp->b_flags |= B_DONE; vfs_bio_bzero_buf(bp, osize, nsize - osize); - if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO) + if ((bp->b_flags & (B_VMIO)) != 0) vfs_bio_set_valid(bp, osize, nsize - osize); *bpp = bp; return (0); @@ -406,7 +406,7 @@ allocbuf(bp, nsize); bp->b_flags |= B_DONE; vfs_bio_bzero_buf(bp, osize, nsize - osize); - if ((bp->b_flags & (B_MALLOC | B_VMIO)) == B_VMIO) + if ((bp->b_flags & B_VMIO) != 0) vfs_bio_set_valid(bp, osize, nsize - osize); *bpp = bp; return (0);