Index: sys/dev/xen/blkfront/blkfront.c =================================================================== --- sys/dev/xen/blkfront/blkfront.c +++ sys/dev/xen/blkfront/blkfront.c @@ -57,6 +57,7 @@ #include #include #include +#include #include @@ -159,14 +160,12 @@ } static void -xbd_mksegarray(bus_dma_segment_t *segs, int nsegs, - grant_ref_t * gref_head, int otherend_id, int readonly, - grant_ref_t * sg_ref, struct blkif_request_segment *sg) +xbd_mksegarray(bus_dma_segment_t *segs, int nsegs, grant_ref_t * sg_ref, + struct blkif_request_segment *sg) { struct blkif_request_segment *last_block_sg = sg + nsegs; vm_paddr_t buffer_ma; uint64_t fsect, lsect; - int ref; while (sg < last_block_sg) { KASSERT(segs->ds_addr % (1 << XBD_SECTOR_SHFT) == 0, @@ -180,26 +179,15 @@ KASSERT(lsect <= 7, ("XEN disk driver data cannot " "cross a page boundary")); - - /* install a grant reference. */ - ref = gnttab_claim_grant_reference(gref_head); - /* * GNTTAB_LIST_END == 0xffffffff, but it is private * to gnttab.c. */ - KASSERT(ref != ~0, ("grant_reference failed")); + KASSERT(*sg_ref != ~0, ("grant_reference failed")); - gnttab_grant_foreign_access_ref( - ref, - otherend_id, - buffer_ma >> PAGE_SHIFT, - readonly); - - *sg_ref = ref; *sg = (struct blkif_request_segment) { - .gref = ref, - .first_sect = fsect, + .gref = *sg_ref, + .first_sect = fsect, .last_sect = lsect }; sg++; @@ -241,10 +229,8 @@ ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk; ring_req->nr_segments = nsegs; cm->cm_nseg = nsegs; - xbd_mksegarray(segs, nsegs, &cm->cm_gref_head, - xenbus_get_otherend_id(sc->xbd_dev), - cm->cm_operation == BLKIF_OP_WRITE, - cm->cm_sg_refs, ring_req->seg); + cm->cm_sg_refs = xen_dmamap_get_grefs(cm->cm_map); + xbd_mksegarray(segs, nsegs, cm->cm_sg_refs, ring_req->seg); } else { blkif_request_indirect_t *ring_req; @@ -259,11 +245,10 @@ ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk; ring_req->nr_segments = nsegs; cm->cm_nseg = nsegs; - xbd_mksegarray(segs, nsegs, &cm->cm_gref_head, - xenbus_get_otherend_id(sc->xbd_dev), - cm->cm_operation == BLKIF_OP_WRITE, - cm->cm_sg_refs, cm->cm_indirectionpages); - memcpy(ring_req->indirect_grefs, &cm->cm_indirectionrefs, + cm->cm_sg_refs = xen_dmamap_get_grefs(cm->cm_map); + xbd_mksegarray(segs, nsegs, cm->cm_sg_refs, + cm->cm_indirectionpages); + memcpy(ring_req->indirect_grefs, cm->cm_indirectionrefs, sizeof(grant_ref_t) * sc->xbd_max_request_indirectpages); } @@ -275,8 +260,6 @@ op = 0; bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op); - gnttab_free_grant_references(cm->cm_gref_head); - xbd_enqueue_cm(cm, XBD_Q_BUSY); /* @@ -299,10 +282,12 @@ if (cm->cm_bp != NULL) error = bus_dmamap_load_bio(sc->xbd_io_dmat, cm->cm_map, - cm->cm_bp, xbd_queue_cb, cm, 0); + cm->cm_bp, xbd_queue_cb, cm, + (cm->cm_operation == BLKIF_OP_WRITE) ? BUS_DMA_XEN_RO : 0); else error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map, - cm->cm_data, cm->cm_datalen, xbd_queue_cb, cm, 0); + cm->cm_data, cm->cm_datalen, xbd_queue_cb, cm, + (cm->cm_operation == BLKIF_OP_WRITE) ? BUS_DMA_XEN_RO : 0); if (error == EINPROGRESS) { /* * Maintain queuing order by freezing the queue. The next @@ -317,20 +302,6 @@ return (error); } -static void -xbd_restart_queue_callback(void *arg) -{ - struct xbd_softc *sc = arg; - - mtx_lock(&sc->xbd_io_lock); - - xbd_thaw(sc, XBDF_GNT_SHORTAGE); - - xbd_startio(sc); - - mtx_unlock(&sc->xbd_io_lock); -} - static struct xbd_command * xbd_bio_command(struct xbd_softc *sc) { @@ -350,17 +321,6 @@ return (NULL); } - if (gnttab_alloc_grant_references(sc->xbd_max_request_segments, - &cm->cm_gref_head) != 0) { - gnttab_request_free_callback(&sc->xbd_callback, - xbd_restart_queue_callback, sc, - sc->xbd_max_request_segments); - xbd_freeze(sc, XBDF_GNT_SHORTAGE); - xbd_requeue_bio(sc, bp); - xbd_enqueue_cm(cm, XBD_Q_FREE); - return (NULL); - } - cm->cm_bp = bp; cm->cm_sector_number = (blkif_sector_t)bp->bio_pblkno; @@ -502,8 +462,6 @@ cm = &sc->xbd_shadow[bret->id]; xbd_remove_cm(cm, XBD_Q_BUSY); - gnttab_end_foreign_access_references(cm->cm_nseg, - cm->cm_sg_refs); i++; if (cm->cm_operation == BLKIF_OP_READ) @@ -515,6 +473,7 @@ op = 0; bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op); bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map); + cm->cm_sg_refs = NULL; /* * Release any hold this command has on future command @@ -622,14 +581,6 @@ return (EBUSY); } - if (gnttab_alloc_grant_references(sc->xbd_max_request_segments, - &cm->cm_gref_head) != 0) { - xbd_free_command(cm); - mtx_unlock(&sc->xbd_io_lock); - device_printf(sc->xbd_dev, "no more grant allocs?\n"); - return (EBUSY); - } - chunk = length > sc->xbd_max_request_size ? sc->xbd_max_request_size : length; cm->cm_data = virtual; @@ -1062,18 +1013,21 @@ cm = &sc->xbd_shadow[i]; if (cm->cm_sg_refs != NULL) { - free(cm->cm_sg_refs, M_XENBLOCKFRONT); + bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map); cm->cm_sg_refs = NULL; } if (cm->cm_indirectionpages != NULL) { - gnttab_end_foreign_access_references( - sc->xbd_max_request_indirectpages, - &cm->cm_indirectionrefs[0]); + bus_dmamap_unload(sc->xbd_io_dmat, + cm->cm_indirectionmap); + bus_dmamap_destroy(sc->xbd_io_dmat, + cm->cm_indirectionmap); contigfree(cm->cm_indirectionpages, PAGE_SIZE * sc->xbd_max_request_indirectpages, M_XENBLOCKFRONT); cm->cm_indirectionpages = NULL; + cm->cm_indirectionrefs = NULL; + cm->cm_indirectionmap = NULL; } bus_dmamap_destroy(sc->xbd_io_dmat, cm->cm_map); @@ -1214,7 +1168,36 @@ xenbus_set_state(sc->xbd_dev, XenbusStateInitialised); } -/* + +/* + * Callback received from the dma load when xbd_connect() loads the indirection + * pages. + */ +static void +xbd_indirectpage_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, + int error) +{ + struct xbd_command *cm; + + cm = callback_arg; + + if (error) { + /* Disable indirection pages and continue. */ + cm->cm_sc->xbd_max_request_indirectpages = 0; + cm->cm_indirectionpages = NULL; + return; + } + + KASSERT((nseg == cm->cm_sc->xbd_max_request_indirectpages), + ("%s: number of dma segments not equal to the expected number. " + "nseg = %d, xbd_max_request_segments = %d. Verify that the " + "constraints passed when creating the tag are correct.", __func__, + nseg, cm->cm_sc->xbd_max_request_indirectpages)); + + cm->cm_indirectionrefs = xen_dmamap_get_grefs(cm->cm_indirectionmap); +} + +/* * Invoked when the backend is finally 'ready' (and has published * the details about the physical device - #sectors, size, etc). */ @@ -1225,7 +1208,7 @@ unsigned long sectors, sector_size, phys_sector_size; unsigned int binfo; int err, feature_barrier, feature_flush; - int i, j; + int i, flags; if (sc->xbd_state == XBD_STATE_CONNECTED || sc->xbd_state == XBD_STATE_SUSPENDED) @@ -1285,6 +1268,9 @@ sc->xbd_max_request_size = XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments); + flags = BUS_DMA_ALLOCNOW | + (xenbus_get_otherend_id(sc->xbd_dev) << BUS_DMA_XEN_DOMID_SHIFT); + /* Allocate datastructures based on negotiated values. */ err = bus_dma_tag_create( bus_get_dma_tag(sc->xbd_dev), /* parent */ @@ -1295,7 +1281,7 @@ sc->xbd_max_request_size, sc->xbd_max_request_segments, PAGE_SIZE, /* maxsegsize */ - BUS_DMA_ALLOCNOW, /* flags */ + flags, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->xbd_io_lock, /* lockarg */ &sc->xbd_io_dmat); @@ -1318,13 +1304,9 @@ for (i = 0; i < sc->xbd_max_requests; i++) { struct xbd_command *cm; void * indirectpages; + int indirectflags; cm = &sc->xbd_shadow[i]; - cm->cm_sg_refs = malloc( - sizeof(grant_ref_t) * sc->xbd_max_request_segments, - M_XENBLOCKFRONT, M_NOWAIT); - if (cm->cm_sg_refs == NULL) - break; cm->cm_id = i; cm->cm_flags = XBDCF_INITIALIZER; cm->cm_sc = sc; @@ -1339,21 +1321,29 @@ sc->xbd_max_request_indirectpages = 0; } else { indirectpages = NULL; + cm->cm_indirectionpages = indirectpages; + xbd_free_command(cm); + continue; } - for (j = 0; j < sc->xbd_max_request_indirectpages; j++) { - if (gnttab_grant_foreign_access( - xenbus_get_otherend_id(sc->xbd_dev), - (vtophys(indirectpages) >> PAGE_SHIFT) + j, - 1 /* grant read-only access */, - &cm->cm_indirectionrefs[j])) - break; + + if (bus_dmamap_create(sc->xbd_io_dmat, 0, + &cm->cm_indirectionmap)) { + contigfree(indirectpages, PAGE_SIZE * + sc->xbd_max_request_indirectpages, M_XENBLOCKFRONT); + break; } - if (j < sc->xbd_max_request_indirectpages) { - contigfree(indirectpages, - PAGE_SIZE * sc->xbd_max_request_indirectpages, - M_XENBLOCKFRONT); + + /* Grant read-only access */ + indirectflags = BUS_DMA_NOWAIT | BUS_DMA_XEN_RO; + if (bus_dmamap_load(sc->xbd_io_dmat, cm->cm_indirectionmap, + indirectpages, PAGE_SIZE * sc->xbd_max_request_indirectpages, + xbd_indirectpage_cb, cm, indirectflags)) { + + contigfree(indirectpages, PAGE_SIZE * + sc->xbd_max_request_indirectpages, M_XENBLOCKFRONT); break; } + cm->cm_indirectionpages = indirectpages; xbd_free_command(cm); } Index: sys/dev/xen/blkfront/block.h =================================================================== --- sys/dev/xen/blkfront/block.h +++ sys/dev/xen/blkfront/block.h @@ -117,7 +117,6 @@ uint64_t cm_id; grant_ref_t *cm_sg_refs; struct bio *cm_bp; - grant_ref_t cm_gref_head; void *cm_data; size_t cm_datalen; u_int cm_nseg; @@ -126,7 +125,8 @@ int cm_status; xbd_cbcf_t *cm_complete; void *cm_indirectionpages; - grant_ref_t cm_indirectionrefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST]; + grant_ref_t *cm_indirectionrefs; + bus_dmamap_t cm_indirectionmap; }; typedef enum {