Index: head/sys/amd64/conf/GENERIC =================================================================== --- head/sys/amd64/conf/GENERIC +++ head/sys/amd64/conf/GENERIC @@ -83,7 +83,9 @@ options KDB # Enable kernel debugger support. options KDB_TRACE # Print a stack trace for a panic. # For full debugger support use (turn off in stable branch): +options BUF_TRACKING # Track buffer history options DDB # Support DDB. +options FULL_BUF_TRACKING # Track more buffer history options GDB # Support remote GDB. options DEADLKRES # Enable the deadlock resolver options INVARIANTS # Enable calls of extra sanity checking Index: head/sys/amd64/conf/GENERIC-NODEBUG =================================================================== --- head/sys/amd64/conf/GENERIC-NODEBUG +++ head/sys/amd64/conf/GENERIC-NODEBUG @@ -34,5 +34,7 @@ nooptions INVARIANT_SUPPORT nooptions WITNESS nooptions WITNESS_SKIPSPIN +nooptions BUF_TRACKING nooptions DEADLKRES +nooptions FULL_BUF_TRACKING Index: head/sys/cam/cam_ccb.h =================================================================== --- head/sys/cam/cam_ccb.h +++ head/sys/cam/cam_ccb.h @@ -744,6 +744,9 @@ #define CAM_TAG_ACTION_NONE 0x00 u_int tag_id; /* tag id from initator (target mode) */ u_int init_id; /* initiator id of who selected */ +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + struct bio *bio; /* Associated bio */ +#endif }; static __inline uint8_t * @@ -1335,6 +1338,9 @@ csio->sense_len = sense_len; csio->cdb_len = cdb_len; csio->tag_action = tag_action; +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + csio->bio = NULL; +#endif } static __inline void Index: head/sys/cam/cam_periph.c =================================================================== --- head/sys/cam/cam_periph.c +++ head/sys/cam/cam_periph.c @@ -1427,6 +1427,11 @@ union ccb *orig_ccb = ccb; int error, recoveryccb; +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + if (ccb->ccb_h.func_code == XPT_SCSI_IO && ccb->csio.bio != NULL) + biotrack(ccb->csio.bio, __func__); +#endif + periph = xpt_path_periph(ccb->ccb_h.path); recoveryccb = (ccb->ccb_h.cbfcnp == camperiphdone); if ((periph->flags & CAM_PERIPH_RECOVERY_INPROG) && !recoveryccb) { Index: head/sys/cam/cam_xpt.c =================================================================== --- head/sys/cam/cam_xpt.c +++ head/sys/cam/cam_xpt.c @@ -31,6 +31,7 @@ __FBSDID("$FreeBSD$"); #include +#include #include #include #include @@ -4471,6 +4472,12 @@ struct cam_doneq *queue; int run, hash; +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + if (done_ccb->ccb_h.func_code == XPT_SCSI_IO && + done_ccb->csio.bio != NULL) + biotrack(done_ccb->csio.bio, __func__); +#endif + CAM_DEBUG(done_ccb->ccb_h.path, CAM_DEBUG_TRACE, ("xpt_done: func= %#x %s status %#x\n", done_ccb->ccb_h.func_code, @@ -5189,6 +5196,16 @@ struct cam_devq *devq; struct mtx *mtx = NULL; +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + struct ccb_scsiio *csio; + + if (ccb_h->func_code == XPT_SCSI_IO) { + csio = &((union ccb *)ccb_h)->csio; + if (csio->bio != NULL) + biotrack(csio->bio, __func__); + } +#endif + if (ccb_h->flags & CAM_HIGH_POWER) { struct highpowerlist *hphead; struct cam_ed *device; Index: head/sys/cam/scsi/scsi_da.c =================================================================== --- head/sys/cam/scsi/scsi_da.c +++ head/sys/cam/scsi/scsi_da.c @@ -2949,6 +2949,8 @@ void *data_ptr; int rw_op; + biotrack(bp, __func__); + if (bp->bio_cmd == BIO_WRITE) { softc->flags |= DA_FLAG_DIRTY; rw_op = SCSI_RW_WRITE; @@ -2976,6 +2978,9 @@ /*dxfer_len*/ bp->bio_bcount, /*sense_len*/SSD_FULL_SIZE, da_default_timeout * 1000); +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + start_ccb->csio.bio = bp; +#endif break; } case BIO_FLUSH: @@ -4008,6 +4013,10 @@ CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("dadone\n")); csio = &done_ccb->csio; +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + if (csio->bio != NULL) + biotrack(csio->bio, __func__); +#endif state = csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK; switch (state) { case DA_CCB_BUFFER_IO: @@ -4106,6 +4115,7 @@ } } + biotrack(bp, __func__); LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); if (LIST_EMPTY(&softc->pending_ccbs)) softc->flags |= DA_FLAG_WAS_OTAG; @@ -5219,6 +5229,11 @@ struct cam_periph *periph; int error, error_code, sense_key, asc, ascq; +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + if (ccb->csio.bio != NULL) + biotrack(ccb->csio.bio, __func__); +#endif + periph = xpt_path_periph(ccb->ccb_h.path); softc = (struct da_softc *)periph->softc; Index: head/sys/conf/options =================================================================== --- head/sys/conf/options +++ head/sys/conf/options @@ -50,12 +50,14 @@ # Debugging options. ALT_BREAK_TO_DEBUGGER opt_kdb.h BREAK_TO_DEBUGGER opt_kdb.h +BUF_TRACKING opt_global.h DDB DDB_BUFR_SIZE opt_ddb.h DDB_CAPTURE_DEFAULTBUFSIZE opt_ddb.h DDB_CAPTURE_MAXBUFSIZE opt_ddb.h DDB_CTF opt_ddb.h DDB_NUMSYM opt_ddb.h +FULL_BUF_TRACKING opt_global.h GDB KDB opt_global.h KDB_TRACE opt_kdb.h Index: head/sys/dev/mps/mps_sas.c =================================================================== --- head/sys/dev/mps/mps_sas.c +++ head/sys/dev/mps/mps_sas.c @@ -1872,6 +1872,10 @@ } } +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + if (csio->bio != NULL) + biotrack(csio->bio, __func__); +#endif callout_reset_sbt(&cm->cm_callout, SBT_1MS * ccb->ccb_h.timeout, 0, mpssas_scsiio_timeout, cm, 0); @@ -2125,6 +2129,11 @@ TAILQ_REMOVE(&cm->cm_targ->commands, cm, cm_link); ccb->ccb_h.status &= ~(CAM_STATUS_MASK | CAM_SIM_QUEUED); +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + if (ccb->csio.bio != NULL) + biotrack(ccb->csio.bio, __func__); +#endif + if (cm->cm_state == MPS_CM_STATE_TIMEDOUT) { TAILQ_REMOVE(&cm->cm_targ->timedout_commands, cm, cm_recovery); if (cm->cm_reply != NULL) Index: head/sys/geom/geom_dev.c =================================================================== --- head/sys/geom/geom_dev.c +++ head/sys/geom/geom_dev.c @@ -668,6 +668,7 @@ sc = cp->private; KASSERT(cp->acr || cp->acw, ("Consumer with zero access count in g_dev_strategy")); + biotrack(bp, __func__); #ifdef INVARIANTS if ((bp->bio_offset % cp->provider->sectorsize) != 0 || (bp->bio_bcount % cp->provider->sectorsize) != 0) { Index: head/sys/geom/geom_disk.c =================================================================== --- head/sys/geom/geom_disk.c +++ head/sys/geom/geom_disk.c @@ -421,6 +421,8 @@ int error; off_t off; + biotrack(bp, __func__); + sc = bp->bio_to->private; if (sc == NULL || (dp = sc->dp) == NULL || dp->d_destroyed) { g_io_deliver(bp, ENXIO); Index: head/sys/geom/geom_io.c =================================================================== --- head/sys/geom/geom_io.c +++ head/sys/geom/geom_io.c @@ -223,6 +223,9 @@ /* Inherit classification info from the parent */ bp2->bio_classifier1 = bp->bio_classifier1; bp2->bio_classifier2 = bp->bio_classifier2; +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + bp2->bio_track_bp = bp->bio_track_bp; +#endif bp->bio_children++; } #ifdef KTR @@ -362,6 +365,8 @@ off_t excess; int error; + biotrack(bp, __func__); + cp = bp->bio_from; pp = bp->bio_to; @@ -503,6 +508,8 @@ struct g_classifier_hook *hook; int classified = 0; + biotrack(bp, __func__); + TAILQ_FOREACH(hook, &g_classifier_tailq, link) classified |= hook->func(hook->arg, bp); @@ -518,6 +525,8 @@ int direct, error, first; uint8_t cmd; + biotrack(bp, __func__); + KASSERT(cp != NULL, ("NULL cp in g_io_request")); KASSERT(bp != NULL, ("NULL bp in g_io_request")); pp = cp->provider; @@ -644,6 +653,8 @@ struct mtx *mtxp; int direct, first; + biotrack(bp, __func__); + KASSERT(bp != NULL, ("NULL bp in g_io_deliver")); pp = bp->bio_to; KASSERT(pp != NULL, ("NULL bio_to in g_io_deliver")); @@ -835,6 +846,7 @@ } CTR0(KTR_GEOM, "g_down has work to do"); g_bioq_unlock(&g_bio_run_down); + biotrack(bp, __func__); if (pace != 0) { /* * There has been at least one memory allocation Index: head/sys/geom/geom_subr.c =================================================================== --- head/sys/geom/geom_subr.c +++ head/sys/geom/geom_subr.c @@ -1531,6 +1531,10 @@ db_printf(" caller2: %p\n", bp->bio_caller2); db_printf(" bio_from: %p\n", bp->bio_from); db_printf(" bio_to: %p\n", bp->bio_to); + +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + db_printf(" bio_track_bp: %p\n", bp->bio_track_bp); +#endif } } Index: head/sys/geom/geom_vfs.c =================================================================== --- head/sys/geom/geom_vfs.c +++ head/sys/geom/geom_vfs.c @@ -192,6 +192,10 @@ } bip->bio_done = g_vfs_done; bip->bio_caller2 = bp; +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + buf_track(bp, __func__); + bip->bio_track_bp = bp; +#endif g_io_request(bip, cp); } Index: head/sys/geom/part/g_part.c =================================================================== --- head/sys/geom/part/g_part.c +++ head/sys/geom/part/g_part.c @@ -2154,6 +2154,8 @@ struct g_provider *pp; char buf[64]; + biotrack(bp, __func__); + pp = bp->bio_to; gp = pp->geom; table = gp->softc; Index: head/sys/kern/vfs_bio.c =================================================================== --- head/sys/kern/vfs_bio.c +++ head/sys/kern/vfs_bio.c @@ -1952,6 +1952,7 @@ if (oldflags & B_ASYNC) BUF_KERNPROC(bp); bp->b_iooffset = dbtob(bp->b_blkno); + buf_track(bp, __func__); bstrategy(bp); if ((oldflags & B_ASYNC) == 0) { @@ -2078,6 +2079,8 @@ VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno, NULL, NULL); } + buf_track(bp, __func__); + /* * Set the *dirty* buffer range based upon the VM system dirty * pages. @@ -2386,6 +2389,8 @@ brelvp(bp); } + buf_track(bp, __func__); + /* buffers with no memory */ if (bp->b_bufsize == 0) { buf_free(bp); @@ -2470,6 +2475,7 @@ binsfree(bp, qindex); out: + buf_track(bp, __func__); /* unlock */ BUF_UNLOCK(bp); if (qindex == QUEUE_CLEAN) @@ -3716,6 +3722,7 @@ CTR4(KTR_BUF, "getblk(%p, %ld, %d) = %p", vp, (long)blkno, size, bp); BUF_ASSERT_HELD(bp); end: + buf_track(bp, __func__); KASSERT(bp->b_bufobj == bo, ("bp %p wrong b_bufobj %p should be %p", bp, bp->b_bufobj, bo)); return (bp); @@ -3892,6 +3899,7 @@ void (*done)(struct bio *); vm_offset_t start, end; + biotrack(bp, __func__); if ((bp->bio_flags & BIO_TRANSIENT_MAPPING) != 0) { bp->bio_flags &= ~BIO_TRANSIENT_MAPPING; bp->bio_flags |= BIO_UNMAPPED; @@ -3948,6 +3956,15 @@ biodone(bp); } +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) +void +biotrack_buf(struct bio *bp, const char *location) +{ + + buf_track(bp->bio_track_bp, location); +} +#endif + /* * bufwait: * @@ -3998,6 +4015,7 @@ struct bufobj *dropobj; void (*biodone)(struct buf *); + buf_track(bp, __func__); CTR3(KTR_BUF, "bufdone(%p) vp %p flags %X", bp, bp->b_vp, bp->b_flags); dropobj = NULL; @@ -4801,6 +4819,9 @@ { /* get args */ struct buf *bp = (struct buf *)addr; +#ifdef FULL_BUF_TRACKING + uint32_t i, j; +#endif if (!have_addr) { db_printf("usage: show buffer \n"); @@ -4837,6 +4858,16 @@ } db_printf("\n"); } +#if defined(FULL_BUF_TRACKING) + db_printf("b_io_tracking: b_io_tcnt = %u\n", bp->b_io_tcnt); + + i = bp->b_io_tcnt % BUF_TRACKING_SIZE; + for (j = 1; j <= BUF_TRACKING_SIZE; j++) + db_printf(" %2u: %s\n", j, + bp->b_io_tracking[BUF_TRACKING_ENTRY(i - j)]); +#elif defined(BUF_TRACKING) + db_printf("b_io_tracking: %s\n", bp->b_io_tracking); +#endif db_printf(" "); BUF_LOCKPRINTINFO(bp); } Index: head/sys/kern/vfs_cluster.c =================================================================== --- head/sys/kern/vfs_cluster.c +++ head/sys/kern/vfs_cluster.c @@ -1008,6 +1008,7 @@ reassignbuf(tbp); /* put on clean list */ bufobj_wref(tbp->b_bufobj); BUF_KERNPROC(tbp); + buf_track(tbp, __func__); TAILQ_INSERT_TAIL(&bp->b_cluster.cluster_head, tbp, b_cluster.cluster_entry); } Index: head/sys/sys/bio.h =================================================================== --- head/sys/sys/bio.h +++ head/sys/sys/bio.h @@ -121,6 +121,9 @@ void *_bio_caller2; uint8_t _bio_cflags; #endif +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) + struct buf *bio_track_bp; /* Parent buf for tracking */ +#endif /* XXX: these go away when bio chaining is introduced */ daddr_t bio_pblkno; /* physical block number */ @@ -142,6 +145,23 @@ void biofinish(struct bio *bp, struct devstat *stat, int error); int biowait(struct bio *bp, const char *wchan); +#if defined(BUF_TRACKING) || defined(FULL_BUF_TRACKING) +void biotrack_buf(struct bio *bp, const char *location); + +static __inline void +biotrack(struct bio *bp, const char *location) +{ + + if (bp->bio_track_bp != NULL) + biotrack_buf(bp, location); +} +#else +static __inline void +biotrack(struct bio *bp __unused, const char *location __unused) +{ +} +#endif + void bioq_disksort(struct bio_queue_head *ap, struct bio *bp); struct bio *bioq_first(struct bio_queue_head *head); struct bio *bioq_takefirst(struct bio_queue_head *head); Index: head/sys/sys/buf.h =================================================================== --- head/sys/sys/buf.h +++ head/sys/sys/buf.h @@ -140,6 +140,15 @@ void *b_fsprivate1; void *b_fsprivate2; void *b_fsprivate3; + +#if defined(FULL_BUF_TRACKING) +#define BUF_TRACKING_SIZE 32 +#define BUF_TRACKING_ENTRY(x) ((x) & (BUF_TRACKING_SIZE - 1)) + const char *b_io_tracking[BUF_TRACKING_SIZE]; + uint32_t b_io_tcnt; +#elif defined(BUF_TRACKING) + const char *b_io_tracking; +#endif }; #define b_object b_bufobj->bo_object @@ -429,6 +438,17 @@ return (0); } +static __inline void +buf_track(struct buf *bp, const char *location) +{ + +#if defined(FULL_BUF_TRACKING) + bp->b_io_tracking[BUF_TRACKING_ENTRY(bp->b_io_tcnt++)] = location; +#elif defined(BUF_TRACKING) + bp->b_io_tracking = location; +#endif +} + #endif /* _KERNEL */ /* Index: head/sys/vm/vm_pager.c =================================================================== --- head/sys/vm/vm_pager.c +++ head/sys/vm/vm_pager.c @@ -376,6 +376,7 @@ bp->b_iodone = NULL; bp->b_error = 0; BUF_LOCK(bp, LK_EXCLUSIVE, NULL); + buf_track(bp, __func__); } /* @@ -473,6 +474,7 @@ KASSERT(bp->b_vp == NULL, ("relpbuf with vp")); KASSERT(bp->b_bufobj == NULL, ("relpbuf with bufobj")); + buf_track(bp, __func__); BUF_UNLOCK(bp); mtx_lock(&pbuf_mtx);