Changeset View
Changeset View
Standalone View
Standalone View
sys/dev/xen/blkfront/blkfront.c
Show First 20 Lines • Show All 51 Lines • ▼ Show 20 Lines | |||||
#include <xen/xen-os.h> | #include <xen/xen-os.h> | ||||
#include <xen/hypervisor.h> | #include <xen/hypervisor.h> | ||||
#include <xen/xen_intr.h> | #include <xen/xen_intr.h> | ||||
#include <xen/gnttab.h> | #include <xen/gnttab.h> | ||||
#include <xen/interface/grant_table.h> | #include <xen/interface/grant_table.h> | ||||
#include <xen/interface/io/protocols.h> | #include <xen/interface/io/protocols.h> | ||||
#include <xen/xenbus/xenbusvar.h> | #include <xen/xenbus/xenbusvar.h> | ||||
#include <xen/busdma_xen.h> | |||||
#include <machine/_inttypes.h> | #include <machine/_inttypes.h> | ||||
#include <geom/geom_disk.h> | #include <geom/geom_disk.h> | ||||
#include <dev/xen/blkfront/block.h> | #include <dev/xen/blkfront/block.h> | ||||
#include "xenbus_if.h" | #include "xenbus_if.h" | ||||
▲ Show 20 Lines • Show All 86 Lines • ▼ Show 20 Lines | xbd_free_command(struct xbd_command *cm) | ||||
cm->cm_flags = XBDCF_INITIALIZER; | cm->cm_flags = XBDCF_INITIALIZER; | ||||
cm->cm_bp = NULL; | cm->cm_bp = NULL; | ||||
cm->cm_complete = NULL; | cm->cm_complete = NULL; | ||||
xbd_enqueue_cm(cm, XBD_Q_FREE); | xbd_enqueue_cm(cm, XBD_Q_FREE); | ||||
xbd_thaw(cm->cm_sc, XBDF_CM_SHORTAGE); | xbd_thaw(cm->cm_sc, XBDF_CM_SHORTAGE); | ||||
} | } | ||||
static void | static void | ||||
xbd_mksegarray(bus_dma_segment_t *segs, int nsegs, | xbd_mksegarray(bus_dma_segment_t *segs, int nsegs, grant_ref_t * sg_ref, | ||||
grant_ref_t * gref_head, int otherend_id, int readonly, | struct blkif_request_segment *sg) | ||||
grant_ref_t * sg_ref, struct blkif_request_segment *sg) | |||||
{ | { | ||||
struct blkif_request_segment *last_block_sg = sg + nsegs; | struct blkif_request_segment *last_block_sg = sg + nsegs; | ||||
vm_paddr_t buffer_ma; | vm_paddr_t buffer_ma; | ||||
uint64_t fsect, lsect; | uint64_t fsect, lsect; | ||||
int ref; | |||||
while (sg < last_block_sg) { | while (sg < last_block_sg) { | ||||
KASSERT(segs->ds_addr % (1 << XBD_SECTOR_SHFT) == 0, | KASSERT(segs->ds_addr % (1 << XBD_SECTOR_SHFT) == 0, | ||||
("XEN disk driver I/O must be sector aligned")); | ("XEN disk driver I/O must be sector aligned")); | ||||
KASSERT(segs->ds_len % (1 << XBD_SECTOR_SHFT) == 0, | KASSERT(segs->ds_len % (1 << XBD_SECTOR_SHFT) == 0, | ||||
("XEN disk driver I/Os must be a multiple of " | ("XEN disk driver I/Os must be a multiple of " | ||||
"the sector length")); | "the sector length")); | ||||
buffer_ma = segs->ds_addr; | buffer_ma = segs->ds_addr; | ||||
fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT; | fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT; | ||||
lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1; | lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1; | ||||
KASSERT(lsect <= 7, ("XEN disk driver data cannot " | KASSERT(lsect <= 7, ("XEN disk driver data cannot " | ||||
"cross a page boundary")); | "cross a page boundary")); | ||||
/* install a grant reference. */ | |||||
ref = gnttab_claim_grant_reference(gref_head); | |||||
/* | /* | ||||
* GNTTAB_LIST_END == 0xffffffff, but it is private | * GNTTAB_LIST_END == 0xffffffff, but it is private | ||||
* to gnttab.c. | * to gnttab.c. | ||||
*/ | */ | ||||
KASSERT(ref != ~0, ("grant_reference failed")); | KASSERT(*sg_ref != ~0, ("grant_reference failed")); | ||||
gnttab_grant_foreign_access_ref( | |||||
ref, | |||||
otherend_id, | |||||
buffer_ma >> PAGE_SHIFT, | |||||
readonly); | |||||
*sg_ref = ref; | |||||
*sg = (struct blkif_request_segment) { | *sg = (struct blkif_request_segment) { | ||||
.gref = ref, | .gref = *sg_ref, | ||||
.first_sect = fsect, | .first_sect = fsect, | ||||
.last_sect = lsect | .last_sect = lsect | ||||
}; | }; | ||||
sg++; | sg++; | ||||
sg_ref++; | sg_ref++; | ||||
segs++; | segs++; | ||||
} | } | ||||
} | } | ||||
Show All 25 Lines | ring_req = (blkif_request_t *) | ||||
RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt); | RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt); | ||||
sc->xbd_ring.req_prod_pvt++; | sc->xbd_ring.req_prod_pvt++; | ||||
ring_req->id = cm->cm_id; | ring_req->id = cm->cm_id; | ||||
ring_req->operation = cm->cm_operation; | ring_req->operation = cm->cm_operation; | ||||
ring_req->sector_number = cm->cm_sector_number; | ring_req->sector_number = cm->cm_sector_number; | ||||
ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk; | ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk; | ||||
ring_req->nr_segments = nsegs; | ring_req->nr_segments = nsegs; | ||||
cm->cm_nseg = nsegs; | cm->cm_nseg = nsegs; | ||||
xbd_mksegarray(segs, nsegs, &cm->cm_gref_head, | cm->cm_sg_refs = xen_dmamap_get_grefs(cm->cm_map); | ||||
xenbus_get_otherend_id(sc->xbd_dev), | xbd_mksegarray(segs, nsegs, cm->cm_sg_refs, ring_req->seg); | ||||
cm->cm_operation == BLKIF_OP_WRITE, | |||||
cm->cm_sg_refs, ring_req->seg); | |||||
} else { | } else { | ||||
blkif_request_indirect_t *ring_req; | blkif_request_indirect_t *ring_req; | ||||
/* Fill out a blkif_request_indirect_t structure. */ | /* Fill out a blkif_request_indirect_t structure. */ | ||||
ring_req = (blkif_request_indirect_t *) | ring_req = (blkif_request_indirect_t *) | ||||
RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt); | RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt); | ||||
sc->xbd_ring.req_prod_pvt++; | sc->xbd_ring.req_prod_pvt++; | ||||
ring_req->id = cm->cm_id; | ring_req->id = cm->cm_id; | ||||
ring_req->operation = BLKIF_OP_INDIRECT; | ring_req->operation = BLKIF_OP_INDIRECT; | ||||
ring_req->indirect_op = cm->cm_operation; | ring_req->indirect_op = cm->cm_operation; | ||||
ring_req->sector_number = cm->cm_sector_number; | ring_req->sector_number = cm->cm_sector_number; | ||||
ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk; | ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk; | ||||
ring_req->nr_segments = nsegs; | ring_req->nr_segments = nsegs; | ||||
cm->cm_nseg = nsegs; | cm->cm_nseg = nsegs; | ||||
xbd_mksegarray(segs, nsegs, &cm->cm_gref_head, | cm->cm_sg_refs = xen_dmamap_get_grefs(cm->cm_map); | ||||
xenbus_get_otherend_id(sc->xbd_dev), | xbd_mksegarray(segs, nsegs, cm->cm_sg_refs, | ||||
cm->cm_operation == BLKIF_OP_WRITE, | cm->cm_indirectionpages); | ||||
cm->cm_sg_refs, cm->cm_indirectionpages); | memcpy(ring_req->indirect_grefs, cm->cm_indirectionrefs, | ||||
memcpy(ring_req->indirect_grefs, &cm->cm_indirectionrefs, | |||||
sizeof(grant_ref_t) * sc->xbd_max_request_indirectpages); | sizeof(grant_ref_t) * sc->xbd_max_request_indirectpages); | ||||
} | } | ||||
if (cm->cm_operation == BLKIF_OP_READ) | if (cm->cm_operation == BLKIF_OP_READ) | ||||
op = BUS_DMASYNC_PREREAD; | op = BUS_DMASYNC_PREREAD; | ||||
else if (cm->cm_operation == BLKIF_OP_WRITE) | else if (cm->cm_operation == BLKIF_OP_WRITE) | ||||
op = BUS_DMASYNC_PREWRITE; | op = BUS_DMASYNC_PREWRITE; | ||||
else | else | ||||
op = 0; | op = 0; | ||||
bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op); | bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op); | ||||
gnttab_free_grant_references(cm->cm_gref_head); | |||||
xbd_enqueue_cm(cm, XBD_Q_BUSY); | xbd_enqueue_cm(cm, XBD_Q_BUSY); | ||||
/* | /* | ||||
* If bus dma had to asynchronously call us back to dispatch | * If bus dma had to asynchronously call us back to dispatch | ||||
* this command, we are no longer executing in the context of | * this command, we are no longer executing in the context of | ||||
* xbd_startio(). Thus we cannot rely on xbd_startio()'s call to | * xbd_startio(). Thus we cannot rely on xbd_startio()'s call to | ||||
* xbd_flush_requests() to publish this command to the backend | * xbd_flush_requests() to publish this command to the backend | ||||
* along with any other commands that it could batch. | * along with any other commands that it could batch. | ||||
*/ | */ | ||||
if ((cm->cm_flags & XBDCF_ASYNC_MAPPING) != 0) | if ((cm->cm_flags & XBDCF_ASYNC_MAPPING) != 0) | ||||
xbd_flush_requests(sc); | xbd_flush_requests(sc); | ||||
return; | return; | ||||
} | } | ||||
static int | static int | ||||
xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm) | xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm) | ||||
{ | { | ||||
int error; | int error; | ||||
if (cm->cm_bp != NULL) | if (cm->cm_bp != NULL) | ||||
error = bus_dmamap_load_bio(sc->xbd_io_dmat, cm->cm_map, | error = bus_dmamap_load_bio(sc->xbd_io_dmat, cm->cm_map, | ||||
cm->cm_bp, xbd_queue_cb, cm, 0); | cm->cm_bp, xbd_queue_cb, cm, | ||||
(cm->cm_operation == BLKIF_OP_WRITE) ? BUS_DMA_XEN_RO : 0); | |||||
else | else | ||||
error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map, | error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map, | ||||
cm->cm_data, cm->cm_datalen, xbd_queue_cb, cm, 0); | cm->cm_data, cm->cm_datalen, xbd_queue_cb, cm, | ||||
(cm->cm_operation == BLKIF_OP_WRITE) ? BUS_DMA_XEN_RO : 0); | |||||
if (error == EINPROGRESS) { | if (error == EINPROGRESS) { | ||||
/* | /* | ||||
* Maintain queuing order by freezing the queue. The next | * Maintain queuing order by freezing the queue. The next | ||||
* command may not require as many resources as the command | * command may not require as many resources as the command | ||||
* we just attempted to map, so we can't rely on bus dma | * we just attempted to map, so we can't rely on bus dma | ||||
* blocking for it too. | * blocking for it too. | ||||
*/ | */ | ||||
xbd_cm_freeze(sc, cm, XBDCF_ASYNC_MAPPING); | xbd_cm_freeze(sc, cm, XBDCF_ASYNC_MAPPING); | ||||
return (0); | return (0); | ||||
} | } | ||||
return (error); | return (error); | ||||
} | } | ||||
static void | |||||
xbd_restart_queue_callback(void *arg) | |||||
{ | |||||
struct xbd_softc *sc = arg; | |||||
mtx_lock(&sc->xbd_io_lock); | |||||
xbd_thaw(sc, XBDF_GNT_SHORTAGE); | |||||
xbd_startio(sc); | |||||
mtx_unlock(&sc->xbd_io_lock); | |||||
} | |||||
static struct xbd_command * | static struct xbd_command * | ||||
xbd_bio_command(struct xbd_softc *sc) | xbd_bio_command(struct xbd_softc *sc) | ||||
{ | { | ||||
struct xbd_command *cm; | struct xbd_command *cm; | ||||
struct bio *bp; | struct bio *bp; | ||||
if (__predict_false(sc->xbd_state != XBD_STATE_CONNECTED)) | if (__predict_false(sc->xbd_state != XBD_STATE_CONNECTED)) | ||||
return (NULL); | return (NULL); | ||||
bp = xbd_dequeue_bio(sc); | bp = xbd_dequeue_bio(sc); | ||||
if (bp == NULL) | if (bp == NULL) | ||||
return (NULL); | return (NULL); | ||||
if ((cm = xbd_dequeue_cm(sc, XBD_Q_FREE)) == NULL) { | if ((cm = xbd_dequeue_cm(sc, XBD_Q_FREE)) == NULL) { | ||||
xbd_freeze(sc, XBDF_CM_SHORTAGE); | xbd_freeze(sc, XBDF_CM_SHORTAGE); | ||||
xbd_requeue_bio(sc, bp); | xbd_requeue_bio(sc, bp); | ||||
return (NULL); | return (NULL); | ||||
} | } | ||||
if (gnttab_alloc_grant_references(sc->xbd_max_request_segments, | |||||
&cm->cm_gref_head) != 0) { | |||||
gnttab_request_free_callback(&sc->xbd_callback, | |||||
xbd_restart_queue_callback, sc, | |||||
sc->xbd_max_request_segments); | |||||
xbd_freeze(sc, XBDF_GNT_SHORTAGE); | |||||
xbd_requeue_bio(sc, bp); | |||||
xbd_enqueue_cm(cm, XBD_Q_FREE); | |||||
return (NULL); | |||||
} | |||||
cm->cm_bp = bp; | cm->cm_bp = bp; | ||||
cm->cm_sector_number = (blkif_sector_t)bp->bio_pblkno; | cm->cm_sector_number = (blkif_sector_t)bp->bio_pblkno; | ||||
switch (bp->bio_cmd) { | switch (bp->bio_cmd) { | ||||
case BIO_READ: | case BIO_READ: | ||||
cm->cm_operation = BLKIF_OP_READ; | cm->cm_operation = BLKIF_OP_READ; | ||||
break; | break; | ||||
case BIO_WRITE: | case BIO_WRITE: | ||||
▲ Show 20 Lines • Show All 125 Lines • ▼ Show 20 Lines | again: | ||||
rp = sc->xbd_ring.sring->rsp_prod; | rp = sc->xbd_ring.sring->rsp_prod; | ||||
rmb(); /* Ensure we see queued responses up to 'rp'. */ | rmb(); /* Ensure we see queued responses up to 'rp'. */ | ||||
for (i = sc->xbd_ring.rsp_cons; i != rp;) { | for (i = sc->xbd_ring.rsp_cons; i != rp;) { | ||||
bret = RING_GET_RESPONSE(&sc->xbd_ring, i); | bret = RING_GET_RESPONSE(&sc->xbd_ring, i); | ||||
cm = &sc->xbd_shadow[bret->id]; | cm = &sc->xbd_shadow[bret->id]; | ||||
xbd_remove_cm(cm, XBD_Q_BUSY); | xbd_remove_cm(cm, XBD_Q_BUSY); | ||||
gnttab_end_foreign_access_references(cm->cm_nseg, | |||||
cm->cm_sg_refs); | |||||
i++; | i++; | ||||
if (cm->cm_operation == BLKIF_OP_READ) | if (cm->cm_operation == BLKIF_OP_READ) | ||||
op = BUS_DMASYNC_POSTREAD; | op = BUS_DMASYNC_POSTREAD; | ||||
else if (cm->cm_operation == BLKIF_OP_WRITE || | else if (cm->cm_operation == BLKIF_OP_WRITE || | ||||
cm->cm_operation == BLKIF_OP_WRITE_BARRIER) | cm->cm_operation == BLKIF_OP_WRITE_BARRIER) | ||||
op = BUS_DMASYNC_POSTWRITE; | op = BUS_DMASYNC_POSTWRITE; | ||||
else | else | ||||
op = 0; | op = 0; | ||||
bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op); | bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op); | ||||
bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map); | bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map); | ||||
cm->cm_sg_refs = NULL; | |||||
/* | /* | ||||
* Release any hold this command has on future command | * Release any hold this command has on future command | ||||
* dispatch. | * dispatch. | ||||
*/ | */ | ||||
xbd_cm_thaw(sc, cm); | xbd_cm_thaw(sc, cm); | ||||
/* | /* | ||||
▲ Show 20 Lines • Show All 91 Lines • ▼ Show 20 Lines | xbd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, | ||||
for (sbp=0; length > 0; sbp++) { | for (sbp=0; length > 0; sbp++) { | ||||
cm = xbd_dequeue_cm(sc, XBD_Q_FREE); | cm = xbd_dequeue_cm(sc, XBD_Q_FREE); | ||||
if (cm == NULL) { | if (cm == NULL) { | ||||
mtx_unlock(&sc->xbd_io_lock); | mtx_unlock(&sc->xbd_io_lock); | ||||
device_printf(sc->xbd_dev, "dump: no more commands?\n"); | device_printf(sc->xbd_dev, "dump: no more commands?\n"); | ||||
return (EBUSY); | return (EBUSY); | ||||
} | } | ||||
if (gnttab_alloc_grant_references(sc->xbd_max_request_segments, | |||||
&cm->cm_gref_head) != 0) { | |||||
xbd_free_command(cm); | |||||
mtx_unlock(&sc->xbd_io_lock); | |||||
device_printf(sc->xbd_dev, "no more grant allocs?\n"); | |||||
return (EBUSY); | |||||
} | |||||
chunk = length > sc->xbd_max_request_size ? | chunk = length > sc->xbd_max_request_size ? | ||||
sc->xbd_max_request_size : length; | sc->xbd_max_request_size : length; | ||||
cm->cm_data = virtual; | cm->cm_data = virtual; | ||||
cm->cm_datalen = chunk; | cm->cm_datalen = chunk; | ||||
cm->cm_operation = BLKIF_OP_WRITE; | cm->cm_operation = BLKIF_OP_WRITE; | ||||
cm->cm_sector_number = offset / dp->d_sectorsize; | cm->cm_sector_number = offset / dp->d_sectorsize; | ||||
cm->cm_complete = xbd_dump_complete; | cm->cm_complete = xbd_dump_complete; | ||||
▲ Show 20 Lines • Show All 416 Lines • ▼ Show 20 Lines | xbd_free(struct xbd_softc *sc) | ||||
xbd_free_ring(sc); | xbd_free_ring(sc); | ||||
if (sc->xbd_shadow) { | if (sc->xbd_shadow) { | ||||
for (i = 0; i < sc->xbd_max_requests; i++) { | for (i = 0; i < sc->xbd_max_requests; i++) { | ||||
struct xbd_command *cm; | struct xbd_command *cm; | ||||
cm = &sc->xbd_shadow[i]; | cm = &sc->xbd_shadow[i]; | ||||
if (cm->cm_sg_refs != NULL) { | if (cm->cm_sg_refs != NULL) { | ||||
free(cm->cm_sg_refs, M_XENBLOCKFRONT); | bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map); | ||||
cm->cm_sg_refs = NULL; | cm->cm_sg_refs = NULL; | ||||
} | } | ||||
if (cm->cm_indirectionpages != NULL) { | if (cm->cm_indirectionpages != NULL) { | ||||
gnttab_end_foreign_access_references( | bus_dmamap_unload(sc->xbd_io_dmat, | ||||
sc->xbd_max_request_indirectpages, | cm->cm_indirectionmap); | ||||
&cm->cm_indirectionrefs[0]); | bus_dmamap_destroy(sc->xbd_io_dmat, | ||||
cm->cm_indirectionmap); | |||||
contigfree(cm->cm_indirectionpages, PAGE_SIZE * | contigfree(cm->cm_indirectionpages, PAGE_SIZE * | ||||
sc->xbd_max_request_indirectpages, | sc->xbd_max_request_indirectpages, | ||||
M_XENBLOCKFRONT); | M_XENBLOCKFRONT); | ||||
cm->cm_indirectionpages = NULL; | cm->cm_indirectionpages = NULL; | ||||
cm->cm_indirectionrefs = NULL; | |||||
cm->cm_indirectionmap = NULL; | |||||
} | } | ||||
bus_dmamap_destroy(sc->xbd_io_dmat, cm->cm_map); | bus_dmamap_destroy(sc->xbd_io_dmat, cm->cm_map); | ||||
} | } | ||||
free(sc->xbd_shadow, M_XENBLOCKFRONT); | free(sc->xbd_shadow, M_XENBLOCKFRONT); | ||||
sc->xbd_shadow = NULL; | sc->xbd_shadow = NULL; | ||||
bus_dma_tag_destroy(sc->xbd_io_dmat); | bus_dma_tag_destroy(sc->xbd_io_dmat); | ||||
▲ Show 20 Lines • Show All 124 Lines • ▼ Show 20 Lines | xenbus_dev_fatal(sc->xbd_dev, error, | ||||
"writing %s/protocol", | "writing %s/protocol", | ||||
node_path); | node_path); | ||||
return; | return; | ||||
} | } | ||||
xenbus_set_state(sc->xbd_dev, XenbusStateInitialised); | xenbus_set_state(sc->xbd_dev, XenbusStateInitialised); | ||||
} | } | ||||
/* | /* | ||||
* Callback received from the dma load when xbd_connect() loads the indirection | |||||
* pages. | |||||
*/ | |||||
static void | |||||
xbd_indirectpage_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, | |||||
int error) | |||||
{ | |||||
struct xbd_command *cm; | |||||
cm = callback_arg; | |||||
if (error) { | |||||
/* Disable indirection pages and continue. */ | |||||
cm->cm_sc->xbd_max_request_indirectpages = 0; | |||||
cm->cm_indirectionpages = NULL; | |||||
return; | |||||
} | |||||
KASSERT((nseg == cm->cm_sc->xbd_max_request_indirectpages), | |||||
("%s: number of dma segments not equal to the expected number. " | |||||
"nseg = %d, xbd_max_request_segments = %d. Verify that the " | |||||
"constraints passed when creating the tag are correct.", __func__, | |||||
nseg, cm->cm_sc->xbd_max_request_indirectpages)); | |||||
cm->cm_indirectionrefs = xen_dmamap_get_grefs(cm->cm_indirectionmap); | |||||
} | |||||
/* | |||||
* Invoked when the backend is finally 'ready' (and has published | * Invoked when the backend is finally 'ready' (and has published | ||||
* the details about the physical device - #sectors, size, etc). | * the details about the physical device - #sectors, size, etc). | ||||
*/ | */ | ||||
static void | static void | ||||
xbd_connect(struct xbd_softc *sc) | xbd_connect(struct xbd_softc *sc) | ||||
{ | { | ||||
device_t dev = sc->xbd_dev; | device_t dev = sc->xbd_dev; | ||||
unsigned long sectors, sector_size, phys_sector_size; | unsigned long sectors, sector_size, phys_sector_size; | ||||
unsigned int binfo; | unsigned int binfo; | ||||
int err, feature_barrier, feature_flush; | int err, feature_barrier, feature_flush; | ||||
int i, j; | int i, flags; | ||||
if (sc->xbd_state == XBD_STATE_CONNECTED || | if (sc->xbd_state == XBD_STATE_CONNECTED || | ||||
sc->xbd_state == XBD_STATE_SUSPENDED) | sc->xbd_state == XBD_STATE_SUSPENDED) | ||||
return; | return; | ||||
DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev)); | DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev)); | ||||
err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), | err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), | ||||
▲ Show 20 Lines • Show All 43 Lines • ▼ Show 20 Lines | if (sc->xbd_max_request_segments > XBD_SIZE_TO_SEGS(MAXPHYS)) | ||||
sc->xbd_max_request_segments = XBD_SIZE_TO_SEGS(MAXPHYS); | sc->xbd_max_request_segments = XBD_SIZE_TO_SEGS(MAXPHYS); | ||||
sc->xbd_max_request_indirectpages = | sc->xbd_max_request_indirectpages = | ||||
XBD_INDIRECT_SEGS_TO_PAGES(sc->xbd_max_request_segments); | XBD_INDIRECT_SEGS_TO_PAGES(sc->xbd_max_request_segments); | ||||
if (sc->xbd_max_request_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) | if (sc->xbd_max_request_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) | ||||
sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; | sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; | ||||
sc->xbd_max_request_size = | sc->xbd_max_request_size = | ||||
XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments); | XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments); | ||||
flags = BUS_DMA_ALLOCNOW | | |||||
(xenbus_get_otherend_id(sc->xbd_dev) << BUS_DMA_XEN_DOMID_SHIFT); | |||||
/* Allocate datastructures based on negotiated values. */ | /* Allocate datastructures based on negotiated values. */ | ||||
err = bus_dma_tag_create( | err = bus_dma_tag_create( | ||||
bus_get_dma_tag(sc->xbd_dev), /* parent */ | bus_get_dma_tag(sc->xbd_dev), /* parent */ | ||||
512, PAGE_SIZE, /* algnmnt, boundary */ | 512, PAGE_SIZE, /* algnmnt, boundary */ | ||||
BUS_SPACE_MAXADDR, /* lowaddr */ | BUS_SPACE_MAXADDR, /* lowaddr */ | ||||
BUS_SPACE_MAXADDR, /* highaddr */ | BUS_SPACE_MAXADDR, /* highaddr */ | ||||
NULL, NULL, /* filter, filterarg */ | NULL, NULL, /* filter, filterarg */ | ||||
sc->xbd_max_request_size, | sc->xbd_max_request_size, | ||||
sc->xbd_max_request_segments, | sc->xbd_max_request_segments, | ||||
PAGE_SIZE, /* maxsegsize */ | PAGE_SIZE, /* maxsegsize */ | ||||
BUS_DMA_ALLOCNOW, /* flags */ | flags, /* flags */ | ||||
busdma_lock_mutex, /* lockfunc */ | busdma_lock_mutex, /* lockfunc */ | ||||
&sc->xbd_io_lock, /* lockarg */ | &sc->xbd_io_lock, /* lockarg */ | ||||
&sc->xbd_io_dmat); | &sc->xbd_io_dmat); | ||||
if (err != 0) { | if (err != 0) { | ||||
xenbus_dev_fatal(sc->xbd_dev, err, | xenbus_dev_fatal(sc->xbd_dev, err, | ||||
"Cannot allocate parent DMA tag\n"); | "Cannot allocate parent DMA tag\n"); | ||||
return; | return; | ||||
} | } | ||||
/* Per-transaction data allocation. */ | /* Per-transaction data allocation. */ | ||||
sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests, | sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests, | ||||
M_XENBLOCKFRONT, M_NOWAIT|M_ZERO); | M_XENBLOCKFRONT, M_NOWAIT|M_ZERO); | ||||
if (sc->xbd_shadow == NULL) { | if (sc->xbd_shadow == NULL) { | ||||
bus_dma_tag_destroy(sc->xbd_io_dmat); | bus_dma_tag_destroy(sc->xbd_io_dmat); | ||||
xenbus_dev_fatal(sc->xbd_dev, ENOMEM, | xenbus_dev_fatal(sc->xbd_dev, ENOMEM, | ||||
"Cannot allocate request structures\n"); | "Cannot allocate request structures\n"); | ||||
return; | return; | ||||
} | } | ||||
for (i = 0; i < sc->xbd_max_requests; i++) { | for (i = 0; i < sc->xbd_max_requests; i++) { | ||||
struct xbd_command *cm; | struct xbd_command *cm; | ||||
void * indirectpages; | void * indirectpages; | ||||
int indirectflags; | |||||
cm = &sc->xbd_shadow[i]; | cm = &sc->xbd_shadow[i]; | ||||
cm->cm_sg_refs = malloc( | |||||
sizeof(grant_ref_t) * sc->xbd_max_request_segments, | |||||
M_XENBLOCKFRONT, M_NOWAIT); | |||||
if (cm->cm_sg_refs == NULL) | |||||
break; | |||||
cm->cm_id = i; | cm->cm_id = i; | ||||
cm->cm_flags = XBDCF_INITIALIZER; | cm->cm_flags = XBDCF_INITIALIZER; | ||||
cm->cm_sc = sc; | cm->cm_sc = sc; | ||||
if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0) | if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0) | ||||
break; | break; | ||||
if (sc->xbd_max_request_indirectpages > 0) { | if (sc->xbd_max_request_indirectpages > 0) { | ||||
indirectpages = contigmalloc( | indirectpages = contigmalloc( | ||||
PAGE_SIZE * sc->xbd_max_request_indirectpages, | PAGE_SIZE * sc->xbd_max_request_indirectpages, | ||||
M_XENBLOCKFRONT, M_ZERO | M_NOWAIT, 0, ~0, | M_XENBLOCKFRONT, M_ZERO | M_NOWAIT, 0, ~0, | ||||
PAGE_SIZE, 0); | PAGE_SIZE, 0); | ||||
if (indirectpages == NULL) | if (indirectpages == NULL) | ||||
sc->xbd_max_request_indirectpages = 0; | sc->xbd_max_request_indirectpages = 0; | ||||
} else { | } else { | ||||
indirectpages = NULL; | indirectpages = NULL; | ||||
cm->cm_indirectionpages = indirectpages; | |||||
xbd_free_command(cm); | |||||
continue; | |||||
} | } | ||||
for (j = 0; j < sc->xbd_max_request_indirectpages; j++) { | |||||
if (gnttab_grant_foreign_access( | if (bus_dmamap_create(sc->xbd_io_dmat, 0, | ||||
xenbus_get_otherend_id(sc->xbd_dev), | &cm->cm_indirectionmap)) { | ||||
(vtophys(indirectpages) >> PAGE_SHIFT) + j, | contigfree(indirectpages, PAGE_SIZE * | ||||
1 /* grant read-only access */, | sc->xbd_max_request_indirectpages, M_XENBLOCKFRONT); | ||||
&cm->cm_indirectionrefs[j])) | |||||
break; | break; | ||||
} | } | ||||
if (j < sc->xbd_max_request_indirectpages) { | |||||
contigfree(indirectpages, | /* Grant read-only access */ | ||||
PAGE_SIZE * sc->xbd_max_request_indirectpages, | indirectflags = BUS_DMA_NOWAIT | BUS_DMA_XEN_RO; | ||||
M_XENBLOCKFRONT); | if (bus_dmamap_load(sc->xbd_io_dmat, cm->cm_indirectionmap, | ||||
indirectpages, PAGE_SIZE * sc->xbd_max_request_indirectpages, | |||||
xbd_indirectpage_cb, cm, indirectflags)) { | |||||
contigfree(indirectpages, PAGE_SIZE * | |||||
sc->xbd_max_request_indirectpages, M_XENBLOCKFRONT); | |||||
break; | break; | ||||
} | } | ||||
cm->cm_indirectionpages = indirectpages; | cm->cm_indirectionpages = indirectpages; | ||||
xbd_free_command(cm); | xbd_free_command(cm); | ||||
} | } | ||||
if (sc->xbd_disk == NULL) { | if (sc->xbd_disk == NULL) { | ||||
device_printf(dev, "%juMB <%s> at %s", | device_printf(dev, "%juMB <%s> at %s", | ||||
(uintmax_t) sectors / (1048576 / sector_size), | (uintmax_t) sectors / (1048576 / sector_size), | ||||
device_get_desc(dev), | device_get_desc(dev), | ||||
▲ Show 20 Lines • Show All 258 Lines • Show Last 20 Lines |