Index: sys/dev/xen/netfront/netfront.c =================================================================== --- sys/dev/xen/netfront/netfront.c +++ sys/dev/xen/netfront/netfront.c @@ -63,6 +63,8 @@ #include +#include + #include #include #include @@ -70,6 +72,7 @@ #include #include #include +#include #include "xenbus_if.h" @@ -169,9 +172,13 @@ netif_rx_front_ring_t ring; xen_intr_handle_t xen_intr_handle; - grant_ref_t gref_head; grant_ref_t grant_ref[NET_RX_RING_SIZE + 1]; + bus_dma_tag_t dmat; + bus_dmamap_t map_pool[NET_RX_RING_SIZE + 1]; + unsigned int pool_idx; + bus_dmamap_t maps[NET_RX_RING_SIZE + 1]; + struct mbuf *mbufs[NET_RX_RING_SIZE + 1]; struct lro_ctrl lro; @@ -189,9 +196,13 @@ netif_tx_front_ring_t ring; xen_intr_handle_t xen_intr_handle; - grant_ref_t gref_head; grant_ref_t grant_ref[NET_TX_RING_SIZE + 1]; + bus_dma_tag_t dmat; + bus_dmamap_t map_pool[NET_TX_RING_SIZE + 1]; + unsigned int pool_idx; + bus_dmamap_t maps[NET_TX_RING_SIZE + 1]; + struct mbuf *mbufs[NET_TX_RING_SIZE + 1]; int mbufs_cnt; struct buf_ring *br; @@ -301,6 +312,90 @@ return (ref); } +static inline bus_dmamap_t +xn_get_rx_map(struct netfront_rxq *rxq, RING_IDX ri) +{ + int i; + bus_dmamap_t map; + + i = xn_rxidx(ri); + map = rxq->maps[i]; + + KASSERT(map != NULL, ("Invalid dma map")); + rxq->maps[i] = NULL; + return(map); +} + +/* XXX Suggest a better function name? */ +static inline bus_dmamap_t +xn_unpool_rx_map(struct netfront_rxq *rxq) +{ + /* XXX Should I convert this to a KASSERT? */ + if (rxq->pool_idx < 0) { + return (NULL); + } + + return (rxq->map_pool[rxq->pool_idx--]); +} + +static inline void +xn_repool_rx_map(struct netfront_rxq *rxq, bus_dmamap_t map) +{ + KASSERT(rxq->pool_idx <= NET_RX_RING_SIZE, + ("Too many rx maps")); + + rxq->map_pool[++rxq->pool_idx] = map; +} + +static inline bus_dmamap_t +xn_unpool_tx_map(struct netfront_txq *txq) +{ + if (txq->pool_idx < 0) { + return (NULL); + } + + return (txq->map_pool[txq->pool_idx--]); +} + +static inline void +xn_repool_tx_map(struct netfront_txq *txq, bus_dmamap_t map) +{ + KASSERT(txq->pool_idx <= NET_TX_RING_SIZE, + ("Too many tx maps")); + KASSERT(map != NULL, ("NULL map being put in the tx pool")); + + txq->map_pool[++txq->pool_idx] = map; +} + +static inline grant_ref_t +xn_get_map_gref(bus_dmamap_t map) +{ + grant_ref_t *refs; + + /* + * We assume that there is only one grant reference. It is the + * responsibility of the function calling the load to make sure this + * assumption holds true. + */ + refs = xen_dmamap_get_grefs(map); + + return *refs; +} + +/* + * Callback received when the dma load is complete. + * + * This function is a generic one used by rx and tx functions. If you want to + * add function-specific functionality, don't do it here. Create a new one. + */ +static void +xn_dma_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) +{ + KASSERT(error == 0, ("%s: Load failed with error code %d", __func__, + error)); + KASSERT(nseg == 1, ("%s: More dma segments than expected", __func__)); +} + #define IPRINTK(fmt, args...) \ printf("[XEN] " fmt, ##args) #ifdef INVARIANTS @@ -666,9 +761,26 @@ static void disconnect_rxq(struct netfront_rxq *rxq) { + int i, error; xn_release_rx_bufs(rxq); - gnttab_free_grant_references(rxq->gref_head); + if (rxq->pool_idx != NET_RX_RING_SIZE) { + printf("%s: Some maps are still in-use\n", __func__); + } + + for (i = 0; i <= NET_RX_RING_SIZE; i++) { + error = bus_dmamap_destroy(rxq->dmat, + rxq->map_pool[i]); + KASSERT(error == 0, ("%s: destruction of map pool failed", + __func__)); + } + + rxq->pool_idx = -1; + + error = bus_dma_tag_destroy(rxq->dmat); + KASSERT(error == 0, ("%s: destruction of rxq dma tag failed", + __func__)); + gnttab_end_foreign_access(rxq->ring_ref, NULL); /* * No split event channel support at the moment, handle will @@ -703,13 +815,16 @@ unsigned long num_queues) { int q, i; - int error; + int error, flags; netif_rx_sring_t *rxs; struct netfront_rxq *rxq; info->rxq = malloc(sizeof(struct netfront_rxq) * num_queues, M_DEVBUF, M_WAITOK|M_ZERO); + /* Flags to be passed to bus_dma_tag_create(). */ + flags = xenbus_get_otherend_id(dev) << BUS_DMA_XEN_DOMID_SHIFT; + for (q = 0; q < num_queues; q++) { rxq = &info->rxq[q]; @@ -717,22 +832,45 @@ rxq->info = info; rxq->ring_ref = GRANT_REF_INVALID; rxq->ring.sring = NULL; + rxq->pool_idx = -1; snprintf(rxq->name, XN_QUEUE_NAME_LEN, "xnrx_%u", q); mtx_init(&rxq->lock, rxq->name, "netfront receive lock", MTX_DEF); + error = bus_dma_tag_create( + bus_get_dma_tag(dev), /* parent */ + 1, PAGE_SIZE, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + PAGE_SIZE, /* maxsize */ + 1, /* nsegments */ + PAGE_SIZE, /* maxsegsize */ + flags, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockarg */ + &rxq->dmat); + + if (error) { + device_printf(dev, "Creating tx tag failed\n"); + goto fail; + } + for (i = 0; i <= NET_RX_RING_SIZE; i++) { rxq->mbufs[i] = NULL; - rxq->grant_ref[i] = GRANT_REF_INVALID; + rxq->maps[i] = NULL; } /* Start resources allocation */ - if (gnttab_alloc_grant_references(NET_RX_RING_SIZE, - &rxq->gref_head) != 0) { - device_printf(dev, "allocating rx gref"); - error = ENOMEM; - goto fail; + for (i = 0; i <= NET_RX_RING_SIZE; i++) { + error = bus_dmamap_create(rxq->dmat, + BUS_DMA_XEN_PREALLOC_REFS, &rxq->map_pool[i]); + if (error) { + device_printf(dev, "creating rx map failed"); + goto fail_dma_map; + } + rxq->pool_idx++; } rxs = (netif_rx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, @@ -753,9 +891,24 @@ return (0); fail_grant_ring: - gnttab_free_grant_references(rxq->gref_head); free(rxq->ring.sring, M_DEVBUF); +fail_dma_map: + /* + * Clean up for the current queue, then call rxq_disconnect() for the + * rest. + * + * XXX Adding another loop after the map creation loop that uses i + * will break this. + */ + for (i--; i >= 0; i--) { + error = bus_dmamap_destroy(rxq->dmat, rxq->map_pool[i]); + KASSERT(error == 0, ("%s: destruction of map pool failed", + __func__)); + } + error = bus_dma_tag_destroy(rxq->dmat); + KASSERT(error == 0, ("%s: destruction of dma tag failed", __func__)); fail: + q--; for (; q >= 0; q--) { disconnect_rxq(&info->rxq[q]); destroy_rxq(&info->rxq[q]); @@ -768,9 +921,25 @@ static void disconnect_txq(struct netfront_txq *txq) { - + int i, error; xn_release_tx_bufs(txq); - gnttab_free_grant_references(txq->gref_head); + if (txq->pool_idx != NET_TX_RING_SIZE) { + printf("%s: Some maps are still in-use\n", __func__); + } + + for (i = 0; i <= NET_TX_RING_SIZE; i++) { + error = bus_dmamap_destroy(txq->dmat, + txq->map_pool[i]); + KASSERT(error == 0, ("%s: destruction of map pool failed", + __func__)); + } + + txq->pool_idx = -1; + + error = bus_dma_tag_destroy(txq->dmat); + KASSERT(error == 0, ("%s: destruction of txq dma tag failed", + __func__)); + gnttab_end_foreign_access(txq->ring_ref, NULL); xen_intr_unbind(&txq->xen_intr_handle); } @@ -802,19 +971,24 @@ unsigned long num_queues) { int q, i; - int error; + int error, flags; netif_tx_sring_t *txs; struct netfront_txq *txq; info->txq = malloc(sizeof(struct netfront_txq) * num_queues, M_DEVBUF, M_WAITOK|M_ZERO); + /* Flags to be passed to bus_dma_tag_create(). */ + flags = xenbus_get_otherend_id(dev) << BUS_DMA_XEN_DOMID_SHIFT; + for (q = 0; q < num_queues; q++) { txq = &info->txq[q]; txq->id = q; txq->info = info; + txq->pool_idx = -1; + txq->ring_ref = GRANT_REF_INVALID; txq->ring.sring = NULL; @@ -823,19 +997,41 @@ mtx_init(&txq->lock, txq->name, "netfront transmit lock", MTX_DEF); + error = bus_dma_tag_create( + bus_get_dma_tag(dev), /* parent */ + 1, PAGE_SIZE, /* alignment, boundary */ + BUS_SPACE_MAXADDR, /* lowaddr */ + BUS_SPACE_MAXADDR, /* highaddr */ + NULL, NULL, /* filter, filterarg */ + PAGE_SIZE, /* maxsize */ + 1, /* nsegments */ + PAGE_SIZE, /* maxsegsize */ + flags, /* flags */ + NULL, /* lockfunc */ + NULL, /* lockarg */ + &txq->dmat); + if (error) { + device_printf(dev, "Creating tx tag failed\n"); + goto fail; + } + for (i = 0; i <= NET_TX_RING_SIZE; i++) { txq->mbufs[i] = (void *) ((u_long) i+1); txq->grant_ref[i] = GRANT_REF_INVALID; + txq->maps[i] = NULL; } txq->mbufs[NET_TX_RING_SIZE] = (void *)0; /* Start resources allocation. */ - if (gnttab_alloc_grant_references(NET_TX_RING_SIZE, - &txq->gref_head) != 0) { - device_printf(dev, "failed to allocate tx grant refs\n"); - error = ENOMEM; - goto fail; + for (i = 0; i <= NET_TX_RING_SIZE; i++) { + error = bus_dmamap_create(txq->dmat, + BUS_DMA_XEN_PREALLOC_REFS, &txq->map_pool[i]); + if (error) { + device_printf(dev, "Creating tx map failed\n"); + goto fail_dma_map; + } + txq->pool_idx++; } txs = (netif_tx_sring_t *)malloc(PAGE_SIZE, M_DEVBUF, @@ -885,9 +1081,24 @@ taskqueue_free(txq->tq); gnttab_end_foreign_access(txq->ring_ref, NULL); fail_grant_ring: - gnttab_free_grant_references(txq->gref_head); free(txq->ring.sring, M_DEVBUF); +fail_dma_map: + /* + * Clean up for the current queue, then call txq_disconnect() for the + * rest. + * + * XXX Adding another loop after the map creation loop that uses i + * will break this. + */ + for (i--; i >= 0; i--) { + error = bus_dmamap_destroy(txq->dmat, txq->map_pool[i]); + KASSERT(error == 0, ("%s: destruction of map pool failed", + __func__)); + } + error = bus_dma_tag_destroy(txq->dmat); + KASSERT(error == 0, ("%s: destruction of dma tag failed", __func__)); fail: + q--; for (; q >= 0; q--) { disconnect_txq(&info->txq[q]); destroy_txq(&info->txq[q]); @@ -1032,10 +1243,15 @@ */ if (((uintptr_t)m) <= NET_TX_RING_SIZE) continue; - gnttab_end_foreign_access_ref(txq->grant_ref[i]); - gnttab_release_grant_reference(&txq->gref_head, - txq->grant_ref[i]); + + bus_dmamap_unload(txq->dmat, txq->maps[i]); + + /* Put the map back in the map pool. */ + xn_repool_tx_map(txq, txq->maps[i]); + txq->grant_ref[i] = GRANT_REF_INVALID; + txq->maps[i] = NULL; + add_id_to_freelist(txq->mbufs, i); txq->mbufs_cnt--; if (txq->mbufs_cnt < 0) { @@ -1062,7 +1278,7 @@ xn_alloc_rx_buffers(struct netfront_rxq *rxq) { RING_IDX req_prod; - int notify; + int notify, error; XN_RX_LOCK_ASSERT(rxq); @@ -1076,7 +1292,7 @@ unsigned short id; grant_ref_t ref; struct netif_rx_request *req; - unsigned long pfn; + bus_dmamap_t map; m = xn_alloc_one_rx_buffer(rxq); if (m == NULL) @@ -1087,16 +1303,19 @@ KASSERT(rxq->mbufs[id] == NULL, ("non-NULL xn_rx_chain")); rxq->mbufs[id] = m; - ref = gnttab_claim_grant_reference(&rxq->gref_head); - KASSERT(ref != GNTTAB_LIST_END, - ("reserved grant references exhuasted")); - rxq->grant_ref[id] = ref; + /* Take a map from the pool. */ + map = xn_unpool_rx_map(rxq); + KASSERT(map != NULL, ("Reserved dma map pool exhausted")); + rxq->maps[id] = map; + + error = bus_dmamap_load(rxq->dmat, map, m->m_data, + m->m_len, xn_dma_cb, rxq, BUS_DMA_NOWAIT); + KASSERT(error == 0, ("%s: map load failed", __func__)); + + ref = rxq->grant_ref[id] = xn_get_map_gref(map); - pfn = atop(vtophys(mtod(m, vm_offset_t))); req = RING_GET_REQUEST(&rxq->ring, req_prod); - gnttab_grant_foreign_access_ref(ref, - xenbus_get_otherend_id(rxq->info->xbdev), pfn, 0); req->id = id; req->gref = ref; } @@ -1130,8 +1349,9 @@ static void xn_release_rx_bufs(struct netfront_rxq *rxq) { - int i, ref; + int i; struct mbuf *m; + bus_dmamap_t map; for (i = 0; i < NET_RX_RING_SIZE; i++) { m = rxq->mbufs[i]; @@ -1139,14 +1359,16 @@ if (m == NULL) continue; - ref = rxq->grant_ref[i]; - if (ref == GRANT_REF_INVALID) - continue; + map = rxq->maps[i]; + + bus_dmamap_unload(rxq->dmat, map); + + /* Put the map back in the map pool. */ + xn_repool_rx_map(rxq, map); - gnttab_end_foreign_access_ref(ref); - gnttab_release_grant_reference(&rxq->gref_head, ref); rxq->mbufs[i] = NULL; rxq->grant_ref[i] = GRANT_REF_INVALID; + rxq->maps[i] = NULL; m_freem(m); } } @@ -1303,10 +1525,12 @@ panic("%s: grant id %u still in use by the " "backend", __func__, id); } - gnttab_end_foreign_access_ref(txq->grant_ref[id]); - gnttab_release_grant_reference( - &txq->gref_head, txq->grant_ref[id]); + + bus_dmamap_unload(txq->dmat, txq->maps[id]); + xn_repool_tx_map(txq, txq->maps[id]); + txq->grant_ref[id] = GRANT_REF_INVALID; + txq->maps[id] = NULL; txq->mbufs[id] = NULL; add_id_to_freelist(txq->mbufs, id); @@ -1353,13 +1577,15 @@ static void xn_move_rx_slot(struct netfront_rxq *rxq, struct mbuf *m, - grant_ref_t ref) + grant_ref_t ref, bus_dmamap_t map) { int new = xn_rxidx(rxq->ring.req_prod_pvt); KASSERT(rxq->mbufs[new] == NULL, ("mbufs != NULL")); + KASSERT(rxq->maps[new] == NULL, ("maps != NULL")); rxq->mbufs[new] = m; rxq->grant_ref[new] = ref; + rxq->maps[new] = map; RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->id = new; RING_GET_REQUEST(&rxq->ring, rxq->ring.req_prod_pvt)->gref = ref; rxq->ring.req_prod_pvt++; @@ -1376,6 +1602,7 @@ do { struct mbuf *m; grant_ref_t ref; + bus_dmamap_t map; if (__predict_false(*cons + 1 == rp)) { err = EINVAL; @@ -1394,7 +1621,8 @@ m = xn_get_rx_mbuf(rxq, *cons); ref = xn_get_rx_ref(rxq, *cons); - xn_move_rx_slot(rxq, m, ref); + map = xn_get_rx_map(rxq, *cons); + xn_move_rx_slot(rxq, m, ref, map); } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE); return err; @@ -1412,9 +1640,10 @@ RING_IDX ref_cons = *cons; int frags = 1; int err = 0; - u_long ret; + bus_dmamap_t map; m0 = m = m_prev = xn_get_rx_mbuf(rxq, *cons); + map = xn_get_rx_map(rxq, *cons); if (rx->flags & NETRXF_extra_info) { err = xn_get_extras(rxq, extras, rp, cons); @@ -1433,10 +1662,11 @@ if (__predict_false(rx->status < 0 || rx->offset + rx->status > PAGE_SIZE)) { - xn_move_rx_slot(rxq, m, ref); + xn_move_rx_slot(rxq, m, ref, map); if (m0 == m) m0 = NULL; m = NULL; + map = NULL; err = EINVAL; goto next_skip_queue; } @@ -1452,10 +1682,10 @@ goto next; } - ret = gnttab_end_foreign_access_ref(ref); - KASSERT(ret, ("Unable to end access to grant references")); - - gnttab_release_grant_reference(&rxq->gref_head, ref); + KASSERT(map != NULL, + ("Bad map corresponding to a valid grant ref")); + bus_dmamap_unload(rxq->dmat, map); + xn_repool_rx_map(rxq, map); next: if (m == NULL) @@ -1501,6 +1731,7 @@ m0 = m; m->m_next = NULL; ref = xn_get_rx_ref(rxq, *cons + frags); + map = xn_get_rx_map(rxq, *cons + frags); ref_cons = *cons + frags; frags++; } @@ -1537,7 +1768,7 @@ struct netfront_info *np = txq->info; struct ifnet *ifp = np->xn_ifp; u_int nfrags; - int otherend_id; + int error; /** * Defragment the mbuf if necessary. @@ -1608,12 +1839,11 @@ * of fragments or hit the end of the mbuf chain. */ m = m_head; - otherend_id = xenbus_get_otherend_id(np->xbdev); for (m = m_head; m; m = m->m_next) { netif_tx_request_t *tx; uintptr_t id; grant_ref_t ref; - u_long mfn; /* XXX Wrong type? */ + bus_dmamap_t map; tx = RING_GET_REQUEST(&txq->ring, txq->ring.req_prod_pvt); id = get_id_from_freelist(txq->mbufs); @@ -1626,12 +1856,21 @@ __func__); txq->mbufs[id] = m; tx->id = id; - ref = gnttab_claim_grant_reference(&txq->gref_head); - KASSERT((short)ref >= 0, ("Negative ref")); - mfn = virt_to_mfn(mtod(m, vm_offset_t)); - gnttab_grant_foreign_access_ref(ref, otherend_id, - mfn, GNTMAP_readonly); - tx->gref = txq->grant_ref[id] = ref; + + /* Take a map from the pool. */ + map = xn_unpool_tx_map(txq); + KASSERT(map != NULL, ("%s: Reserved dma map pool exhausted", + __func__)); + txq->maps[id] = map; + + error = bus_dmamap_load(txq->dmat, map, m->m_data, + m->m_len, xn_dma_cb, txq, BUS_DMA_XEN_RO | + BUS_DMA_NOWAIT); + KASSERT(error == 0, ("%s: map load failed", __func__)); + + ref = txq->grant_ref[id] = xn_get_map_gref(map); + + tx->gref = ref; tx->offset = mtod(m, vm_offset_t) & (PAGE_SIZE - 1); tx->flags = 0; if (m == m_head) { @@ -1888,29 +2127,31 @@ if_link_state_change(ifp, LINK_STATE_DOWN); } +/* XXX I'm not sure this is correct. */ static void xn_rebuild_rx_bufs(struct netfront_rxq *rxq) { - int requeue_idx, i; + int requeue_idx, i, error; grant_ref_t ref; netif_rx_request_t *req; + bus_dmamap_t map; for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { struct mbuf *m; - u_long pfn; if (rxq->mbufs[i] == NULL) continue; m = rxq->mbufs[requeue_idx] = xn_get_rx_mbuf(rxq, i); - ref = rxq->grant_ref[requeue_idx] = xn_get_rx_ref(rxq, i); + map = rxq->maps[requeue_idx] = xn_get_rx_map(rxq, i); req = RING_GET_REQUEST(&rxq->ring, requeue_idx); - pfn = vtophys(mtod(m, vm_offset_t)) >> PAGE_SHIFT; - gnttab_grant_foreign_access_ref(ref, - xenbus_get_otherend_id(rxq->info->xbdev), - pfn, 0); + error = bus_dmamap_load(rxq->dmat, map, m->m_data, + m->m_len, xn_dma_cb, rxq, BUS_DMA_NOWAIT); + KASSERT(error == 0, ("%s: map load failed", __func__)); + + ref = rxq->grant_ref[requeue_idx] = xn_get_map_gref(map); req->gref = ref; req->id = requeue_idx;