Index: head/sys/dev/ioat/ioat.c =================================================================== --- head/sys/dev/ioat/ioat.c (revision 289981) +++ head/sys/dev/ioat/ioat.c (revision 289982) @@ -1,1226 +1,1424 @@ /*- * Copyright (C) 2012 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ioat.h" #include "ioat_hw.h" #include "ioat_internal.h" #define IOAT_INTR_TIMO (hz / 10) #define IOAT_REFLK (&ioat->submit_lock) static int ioat_probe(device_t device); static int ioat_attach(device_t device); static int ioat_detach(device_t device); static int ioat_setup_intr(struct ioat_softc *ioat); static int ioat_teardown_intr(struct ioat_softc *ioat); static int ioat3_attach(device_t device); static int ioat_start_channel(struct ioat_softc *ioat); static int ioat_map_pci_bar(struct ioat_softc *ioat); static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void ioat_interrupt_handler(void *arg); static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat); static void ioat_process_events(struct ioat_softc *ioat); static inline uint32_t ioat_get_active(struct ioat_softc *ioat); static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat); +static void ioat_free_ring(struct ioat_softc *, uint32_t size, + struct ioat_descriptor **); static void ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc); -static struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *ioat); -static int ioat_reserve_space_and_lock(struct ioat_softc *ioat, int num_descs); +static struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *, + int mflags); +static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags); static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index); -static boolean_t resize_ring(struct ioat_softc *ioat, int order); +static struct ioat_descriptor **ioat_prealloc_ring(struct ioat_softc *, + uint32_t size, boolean_t need_dscr, int mflags); +static int ring_grow(struct ioat_softc *, uint32_t oldorder, + struct ioat_descriptor **); +static int ring_shrink(struct ioat_softc *, uint32_t oldorder, + struct ioat_descriptor **); static void ioat_timer_callback(void *arg); static void dump_descriptor(void *hw_desc); static void ioat_submit_single(struct ioat_softc *ioat); static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error); static int ioat_reset_hw(struct ioat_softc *ioat); static void ioat_setup_sysctl(device_t device); static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS); static inline struct ioat_softc *ioat_get(struct ioat_softc *, enum ioat_ref_kind); static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind); static inline void ioat_putn(struct ioat_softc *, uint32_t, enum ioat_ref_kind); static void ioat_drain(struct ioat_softc *); #define ioat_log_message(v, ...) do { \ if ((v) <= g_ioat_debug_level) { \ device_printf(ioat->device, __VA_ARGS__); \ } \ } while (0) MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations"); SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node"); static int g_force_legacy_interrupts; SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN, &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled"); int g_ioat_debug_level = 0; SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level, 0, "Set log level (0-3) for ioat(4). Higher is more verbose."); /* * OS <-> Driver interface structures */ static device_method_t ioat_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ioat_probe), DEVMETHOD(device_attach, ioat_attach), DEVMETHOD(device_detach, ioat_detach), { 0, 0 } }; static driver_t ioat_pci_driver = { "ioat", ioat_pci_methods, sizeof(struct ioat_softc), }; static devclass_t ioat_devclass; DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0); /* * Private data structures */ static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS]; static int ioat_channel_index = 0; SYSCTL_INT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0, "Number of IOAT channels attached"); static struct _pcsid { u_int32_t type; const char *desc; } pci_ids[] = { { 0x34308086, "TBG IOAT Ch0" }, { 0x34318086, "TBG IOAT Ch1" }, { 0x34328086, "TBG IOAT Ch2" }, { 0x34338086, "TBG IOAT Ch3" }, { 0x34298086, "TBG IOAT Ch4" }, { 0x342a8086, "TBG IOAT Ch5" }, { 0x342b8086, "TBG IOAT Ch6" }, { 0x342c8086, "TBG IOAT Ch7" }, { 0x37108086, "JSF IOAT Ch0" }, { 0x37118086, "JSF IOAT Ch1" }, { 0x37128086, "JSF IOAT Ch2" }, { 0x37138086, "JSF IOAT Ch3" }, { 0x37148086, "JSF IOAT Ch4" }, { 0x37158086, "JSF IOAT Ch5" }, { 0x37168086, "JSF IOAT Ch6" }, { 0x37178086, "JSF IOAT Ch7" }, { 0x37188086, "JSF IOAT Ch0 (RAID)" }, { 0x37198086, "JSF IOAT Ch1 (RAID)" }, { 0x3c208086, "SNB IOAT Ch0" }, { 0x3c218086, "SNB IOAT Ch1" }, { 0x3c228086, "SNB IOAT Ch2" }, { 0x3c238086, "SNB IOAT Ch3" }, { 0x3c248086, "SNB IOAT Ch4" }, { 0x3c258086, "SNB IOAT Ch5" }, { 0x3c268086, "SNB IOAT Ch6" }, { 0x3c278086, "SNB IOAT Ch7" }, { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" }, { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" }, { 0x0e208086, "IVB IOAT Ch0" }, { 0x0e218086, "IVB IOAT Ch1" }, { 0x0e228086, "IVB IOAT Ch2" }, { 0x0e238086, "IVB IOAT Ch3" }, { 0x0e248086, "IVB IOAT Ch4" }, { 0x0e258086, "IVB IOAT Ch5" }, { 0x0e268086, "IVB IOAT Ch6" }, { 0x0e278086, "IVB IOAT Ch7" }, { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" }, { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" }, { 0x2f208086, "HSW IOAT Ch0" }, { 0x2f218086, "HSW IOAT Ch1" }, { 0x2f228086, "HSW IOAT Ch2" }, { 0x2f238086, "HSW IOAT Ch3" }, { 0x2f248086, "HSW IOAT Ch4" }, { 0x2f258086, "HSW IOAT Ch5" }, { 0x2f268086, "HSW IOAT Ch6" }, { 0x2f278086, "HSW IOAT Ch7" }, { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" }, { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" }, { 0x0c508086, "BWD IOAT Ch0" }, { 0x0c518086, "BWD IOAT Ch1" }, { 0x0c528086, "BWD IOAT Ch2" }, { 0x0c538086, "BWD IOAT Ch3" }, { 0x6f508086, "BDXDE IOAT Ch0" }, { 0x6f518086, "BDXDE IOAT Ch1" }, { 0x6f528086, "BDXDE IOAT Ch2" }, { 0x6f538086, "BDXDE IOAT Ch3" }, { 0x00000000, NULL } }; /* * OS <-> Driver linkage functions */ static int ioat_probe(device_t device) { struct _pcsid *ep; u_int32_t type; type = pci_get_devid(device); for (ep = pci_ids; ep->type; ep++) { if (ep->type == type) { device_set_desc(device, ep->desc); return (0); } } return (ENXIO); } static int ioat_attach(device_t device) { struct ioat_softc *ioat; int error; ioat = DEVICE2SOFTC(device); ioat->device = device; error = ioat_map_pci_bar(ioat); if (error != 0) goto err; ioat->version = ioat_read_cbver(ioat); if (ioat->version < IOAT_VER_3_0) { error = ENODEV; goto err; } error = ioat3_attach(device); if (error != 0) goto err; error = pci_enable_busmaster(device); if (error != 0) goto err; error = ioat_setup_intr(ioat); if (error != 0) goto err; error = ioat_reset_hw(ioat); if (error != 0) goto err; ioat_process_events(ioat); ioat_setup_sysctl(device); ioat_channel[ioat_channel_index++] = ioat; ioat_test_attach(); err: if (error != 0) ioat_detach(device); return (error); } static int ioat_detach(device_t device) { struct ioat_softc *ioat; - uint32_t i; ioat = DEVICE2SOFTC(device); ioat_test_detach(); ioat_drain(ioat); ioat_teardown_intr(ioat); callout_drain(&ioat->timer); pci_disable_busmaster(device); if (ioat->pci_resource != NULL) bus_release_resource(device, SYS_RES_MEMORY, ioat->pci_resource_id, ioat->pci_resource); - if (ioat->ring != NULL) { - for (i = 0; i < (1 << ioat->ring_size_order); i++) - ioat_free_ring_entry(ioat, ioat->ring[i]); - free(ioat->ring, M_IOAT); - } + if (ioat->ring != NULL) + ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring); if (ioat->comp_update != NULL) { bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map); bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update, ioat->comp_update_map); bus_dma_tag_destroy(ioat->comp_update_tag); } bus_dma_tag_destroy(ioat->hw_desc_tag); return (0); } static int ioat_teardown_intr(struct ioat_softc *ioat) { if (ioat->tag != NULL) bus_teardown_intr(ioat->device, ioat->res, ioat->tag); if (ioat->res != NULL) bus_release_resource(ioat->device, SYS_RES_IRQ, rman_get_rid(ioat->res), ioat->res); pci_release_msi(ioat->device); return (0); } static int ioat_start_channel(struct ioat_softc *ioat) { uint64_t status; uint32_t chanerr; int i; ioat_acquire(&ioat->dmaengine); ioat_null(&ioat->dmaengine, NULL, NULL, 0); ioat_release(&ioat->dmaengine); for (i = 0; i < 100; i++) { DELAY(1); status = ioat_get_chansts(ioat); if (is_ioat_idle(status)) return (0); } chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); ioat_log_message(0, "could not start channel: " "status = %#jx error = %x\n", (uintmax_t)status, chanerr); return (ENXIO); } /* * Initialize Hardware */ static int ioat3_attach(device_t device) { struct ioat_softc *ioat; struct ioat_descriptor **ring; struct ioat_descriptor *next; struct ioat_dma_hw_descriptor *dma_hw_desc; uint32_t capabilities; int i, num_descriptors; int error; uint8_t xfercap; error = 0; ioat = DEVICE2SOFTC(device); capabilities = ioat_read_dmacapability(ioat); xfercap = ioat_read_xfercap(ioat); ioat->max_xfer_size = 1 << xfercap; /* TODO: need to check DCA here if we ever do XOR/PQ */ mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF); mtx_init(&ioat->cleanup_lock, "ioat_process_events", NULL, MTX_DEF); callout_init(&ioat->timer, 1); ioat->is_resize_pending = FALSE; ioat->is_completion_pending = FALSE; ioat->is_reset_pending = FALSE; ioat->is_channel_running = FALSE; bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL, &ioat->comp_update_tag); error = bus_dmamem_alloc(ioat->comp_update_tag, (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map); if (ioat->comp_update == NULL) return (ENOMEM); error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map, ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat, 0); if (error != 0) return (error); ioat->ring_size_order = IOAT_MIN_ORDER; num_descriptors = 1 << ioat->ring_size_order; bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct ioat_dma_hw_descriptor), 1, sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL, &ioat->hw_desc_tag); ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT, - M_ZERO | M_NOWAIT); + M_ZERO | M_WAITOK); if (ioat->ring == NULL) return (ENOMEM); ring = ioat->ring; for (i = 0; i < num_descriptors; i++) { - ring[i] = ioat_alloc_ring_entry(ioat); + ring[i] = ioat_alloc_ring_entry(ioat, M_WAITOK); if (ring[i] == NULL) return (ENOMEM); ring[i]->id = i; } for (i = 0; i < num_descriptors - 1; i++) { next = ring[i + 1]; dma_hw_desc = ring[i]->u.dma; dma_hw_desc->next = next->hw_desc_bus_addr; } ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr; - ioat->head = 0; + ioat->head = ioat->hw_head = 0; ioat->tail = 0; ioat->last_seen = 0; return (0); } static int ioat_map_pci_bar(struct ioat_softc *ioat) { ioat->pci_resource_id = PCIR_BAR(0); ioat->pci_resource = bus_alloc_resource_any(ioat->device, SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE); if (ioat->pci_resource == NULL) { ioat_log_message(0, "unable to allocate pci resource\n"); return (ENODEV); } ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource); ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource); return (0); } static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error) { struct ioat_softc *ioat = arg; KASSERT(error == 0, ("%s: error:%d", __func__, error)); ioat->comp_update_bus_addr = seg[0].ds_addr; } static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr; KASSERT(error == 0, ("%s: error:%d", __func__, error)); baddr = arg; *baddr = segs->ds_addr; } /* * Interrupt setup and handlers */ static int ioat_setup_intr(struct ioat_softc *ioat) { uint32_t num_vectors; int error; boolean_t use_msix; boolean_t force_legacy_interrupts; use_msix = FALSE; force_legacy_interrupts = FALSE; if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) { num_vectors = 1; pci_alloc_msix(ioat->device, &num_vectors); if (num_vectors == 1) use_msix = TRUE; } if (use_msix) { ioat->rid = 1; ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, &ioat->rid, RF_ACTIVE); } else { ioat->rid = 0; ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ, &ioat->rid, RF_SHAREABLE | RF_ACTIVE); } if (ioat->res == NULL) { ioat_log_message(0, "bus_alloc_resource failed\n"); return (ENOMEM); } ioat->tag = NULL; error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE | INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag); if (error != 0) { ioat_log_message(0, "bus_setup_intr failed\n"); return (error); } ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN); return (0); } static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat) { u_int32_t pciid; pciid = pci_get_devid(ioat->device); switch (pciid) { /* BWD: */ case 0x0c508086: case 0x0c518086: case 0x0c528086: case 0x0c538086: /* BDXDE: */ case 0x6f508086: case 0x6f518086: case 0x6f528086: case 0x6f538086: return (TRUE); } return (FALSE); } static void ioat_interrupt_handler(void *arg) { struct ioat_softc *ioat = arg; ioat_process_events(ioat); } static void ioat_process_events(struct ioat_softc *ioat) { struct ioat_descriptor *desc; struct bus_dmadesc *dmadesc; uint64_t comp_update, status; uint32_t completed; mtx_lock(&ioat->cleanup_lock); completed = 0; comp_update = *ioat->comp_update; status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK; CTR0(KTR_IOAT, __func__); if (status == ioat->last_seen) goto out; while (1) { desc = ioat_get_ring_entry(ioat, ioat->tail); dmadesc = &desc->bus_dmadesc; CTR1(KTR_IOAT, "completing desc %d", ioat->tail); if (dmadesc->callback_fn) (*dmadesc->callback_fn)(dmadesc->callback_arg); completed++; ioat->tail++; if (desc->hw_desc_bus_addr == status) break; } ioat->last_seen = desc->hw_desc_bus_addr; if (ioat->head == ioat->tail) { ioat->is_completion_pending = FALSE; callout_reset(&ioat->timer, IOAT_INTR_TIMO, ioat_timer_callback, ioat); } out: ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); mtx_unlock(&ioat->cleanup_lock); ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF); + wakeup(&ioat->tail); } /* * User API functions */ bus_dmaengine_t ioat_get_dmaengine(uint32_t index) { if (index >= ioat_channel_index) return (NULL); return (&ioat_get(ioat_channel[index], IOAT_DMAENGINE_REF)->dmaengine); } void ioat_put_dmaengine(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); ioat_put(ioat, IOAT_DMAENGINE_REF); } void ioat_acquire(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); mtx_lock(&ioat->submit_lock); CTR0(KTR_IOAT, __func__); } void ioat_release(bus_dmaengine_t dmaengine) { struct ioat_softc *ioat; ioat = to_ioat_softc(dmaengine); CTR0(KTR_IOAT, __func__); - ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->head); + ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->hw_head); mtx_unlock(&ioat->submit_lock); } struct bus_dmadesc * ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_softc *ioat; struct ioat_descriptor *desc; struct ioat_dma_hw_descriptor *hw_desc; + int mflags; KASSERT((flags & ~DMA_ALL_FLAGS) == 0, ("Unrecognized flag(s): %#x", flags & ~DMA_ALL_FLAGS)); + if ((flags & DMA_NO_WAIT) != 0) + mflags = M_NOWAIT; + else + mflags = M_WAITOK; ioat = to_ioat_softc(dmaengine); mtx_assert(&ioat->submit_lock, MA_OWNED); - if (ioat_reserve_space_and_lock(ioat, 1) != 0) + if (ioat_reserve_space(ioat, 1, mflags) != 0) return (NULL); CTR0(KTR_IOAT, __func__); desc = ioat_get_ring_entry(ioat, ioat->head); hw_desc = desc->u.dma; hw_desc->u.control_raw = 0; hw_desc->u.control.null = 1; hw_desc->u.control.completion_update = 1; if ((flags & DMA_INT_EN) != 0) hw_desc->u.control.int_enable = 1; hw_desc->size = 8; hw_desc->src_addr = 0; hw_desc->dest_addr = 0; desc->bus_dmadesc.callback_fn = callback_fn; desc->bus_dmadesc.callback_arg = callback_arg; ioat_submit_single(ioat); return (&desc->bus_dmadesc); } struct bus_dmadesc * ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags) { struct ioat_descriptor *desc; struct ioat_dma_hw_descriptor *hw_desc; struct ioat_softc *ioat; + int mflags; KASSERT((flags & ~DMA_ALL_FLAGS) == 0, ("Unrecognized flag(s): %#x", flags & ~DMA_ALL_FLAGS)); + if ((flags & DMA_NO_WAIT) != 0) + mflags = M_NOWAIT; + else + mflags = M_WAITOK; ioat = to_ioat_softc(dmaengine); mtx_assert(&ioat->submit_lock, MA_OWNED); if (len > ioat->max_xfer_size) { ioat_log_message(0, "%s: max_xfer_size = %d, requested = %d\n", __func__, ioat->max_xfer_size, (int)len); return (NULL); } - if (ioat_reserve_space_and_lock(ioat, 1) != 0) + if (ioat_reserve_space(ioat, 1, mflags) != 0) return (NULL); CTR0(KTR_IOAT, __func__); desc = ioat_get_ring_entry(ioat, ioat->head); hw_desc = desc->u.dma; hw_desc->u.control_raw = 0; hw_desc->u.control.completion_update = 1; if ((flags & DMA_INT_EN) != 0) hw_desc->u.control.int_enable = 1; hw_desc->size = len; hw_desc->src_addr = src; hw_desc->dest_addr = dst; if (g_ioat_debug_level >= 3) dump_descriptor(hw_desc); desc->bus_dmadesc.callback_fn = callback_fn; desc->bus_dmadesc.callback_arg = callback_arg; ioat_submit_single(ioat); return (&desc->bus_dmadesc); } /* * Ring Management */ static inline uint32_t ioat_get_active(struct ioat_softc *ioat) { return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1)); } static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat) { return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1); } static struct ioat_descriptor * -ioat_alloc_ring_entry(struct ioat_softc *ioat) +ioat_alloc_ring_entry(struct ioat_softc *ioat, int mflags) { struct ioat_dma_hw_descriptor *hw_desc; struct ioat_descriptor *desc; - int error; + int error, busdmaflag; error = ENOMEM; hw_desc = NULL; - desc = malloc(sizeof(*desc), M_IOAT, M_NOWAIT); + if ((mflags & M_WAITOK) != 0) + busdmaflag = BUS_DMA_WAITOK; + else + busdmaflag = BUS_DMA_NOWAIT; + + desc = malloc(sizeof(*desc), M_IOAT, mflags); if (desc == NULL) goto out; bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc, - BUS_DMA_ZERO | BUS_DMA_NOWAIT, &ioat->hw_desc_map); + BUS_DMA_ZERO | busdmaflag, &ioat->hw_desc_map); if (hw_desc == NULL) goto out; desc->u.dma = hw_desc; error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc, sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr, - BUS_DMA_NOWAIT); + busdmaflag); if (error) goto out; out: if (error) { ioat_free_ring_entry(ioat, desc); return (NULL); } return (desc); } static void ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc) { if (desc == NULL) return; if (desc->u.dma) bus_dmamem_free(ioat->hw_desc_tag, desc->u.dma, ioat->hw_desc_map); free(desc, M_IOAT); } +/* + * Reserves space in this IOAT descriptor ring by ensuring enough slots remain + * for 'num_descs'. + * + * If mflags contains M_WAITOK, blocks until enough space is available. + * + * Returns zero on success, or an errno on error. If num_descs is beyond the + * maximum ring size, returns EINVAl; if allocation would block and mflags + * contains M_NOWAIT, returns EAGAIN. + * + * Must be called with the submit_lock held; returns with the lock held. The + * lock may be dropped to allocate the ring. + * + * (The submit_lock is needed to add any entries to the ring, so callers are + * assured enough room is available.) + */ static int -ioat_reserve_space_and_lock(struct ioat_softc *ioat, int num_descs) +ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags) { - boolean_t retry; + struct ioat_descriptor **new_ring; + uint32_t order; + int error; - while (1) { + mtx_assert(&ioat->submit_lock, MA_OWNED); + error = 0; + + if (num_descs < 1 || num_descs > (1 << IOAT_MAX_ORDER)) { + error = EINVAL; + goto out; + } + + for (;;) { if (ioat_get_ring_space(ioat) >= num_descs) - return (0); + goto out; - mtx_lock(&ioat->cleanup_lock); - retry = resize_ring(ioat, ioat->ring_size_order + 1); - mtx_unlock(&ioat->cleanup_lock); + order = ioat->ring_size_order; + if (ioat->is_resize_pending || order == IOAT_MAX_ORDER) { + if ((mflags & M_WAITOK) != 0) { + msleep(&ioat->tail, &ioat->submit_lock, 0, + "ioat_rsz", 0); + continue; + } - if (!retry) - return (ENOMEM); + error = EAGAIN; + break; + } + + ioat->is_resize_pending = TRUE; + for (;;) { + mtx_unlock(&ioat->submit_lock); + + new_ring = ioat_prealloc_ring(ioat, 1 << (order + 1), + TRUE, mflags); + + mtx_lock(&ioat->submit_lock); + KASSERT(ioat->ring_size_order == order, + ("is_resize_pending should protect order")); + + if (new_ring == NULL) { + KASSERT((mflags & M_WAITOK) == 0, + ("allocation failed")); + error = EAGAIN; + break; + } + + error = ring_grow(ioat, order, new_ring); + if (error == 0) + break; + } + ioat->is_resize_pending = FALSE; + wakeup(&ioat->tail); + if (error) + break; } + +out: + mtx_assert(&ioat->submit_lock, MA_OWNED); + return (error); } +static struct ioat_descriptor ** +ioat_prealloc_ring(struct ioat_softc *ioat, uint32_t size, boolean_t need_dscr, + int mflags) +{ + struct ioat_descriptor **ring; + uint32_t i; + int error; + + KASSERT(size > 0 && powerof2(size), ("bogus size")); + + ring = malloc(size * sizeof(*ring), M_IOAT, M_ZERO | mflags); + if (ring == NULL) + return (NULL); + + if (need_dscr) { + error = ENOMEM; + for (i = size / 2; i < size; i++) { + ring[i] = ioat_alloc_ring_entry(ioat, mflags); + if (ring[i] == NULL) + goto out; + ring[i]->id = i; + } + } + error = 0; + +out: + if (error != 0 && ring != NULL) { + ioat_free_ring(ioat, size, ring); + ring = NULL; + } + return (ring); +} + +static void +ioat_free_ring(struct ioat_softc *ioat, uint32_t size, + struct ioat_descriptor **ring) +{ + uint32_t i; + + for (i = 0; i < size; i++) { + if (ring[i] != NULL) + ioat_free_ring_entry(ioat, ring[i]); + } + free(ring, M_IOAT); +} + static struct ioat_descriptor * ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index) { return (ioat->ring[index % (1 << ioat->ring_size_order)]); } -static boolean_t -resize_ring(struct ioat_softc *ioat, int order) +static int +ring_grow(struct ioat_softc *ioat, uint32_t oldorder, + struct ioat_descriptor **newring) { - struct ioat_descriptor **ring; - struct ioat_descriptor *next; + struct ioat_descriptor *tmp, *next; struct ioat_dma_hw_descriptor *hw; - struct ioat_descriptor *ent; - uint32_t current_size, active, new_size, i, new_idx, current_idx; - uint32_t new_idx2; + uint32_t oldsize, newsize, head, tail, i, end; + int error; - current_size = 1 << ioat->ring_size_order; - active = (ioat->head - ioat->tail) & (current_size - 1); - new_size = 1 << order; + CTR0(KTR_IOAT, __func__); - if (order > IOAT_MAX_ORDER) - return (FALSE); + mtx_assert(&ioat->submit_lock, MA_OWNED); - /* - * when shrinking, verify that we can hold the current active - * set in the new ring - */ - if (active >= new_size) - return (FALSE); + if (oldorder != ioat->ring_size_order || oldorder >= IOAT_MAX_ORDER) { + error = EINVAL; + goto out; + } - /* allocate the array to hold the software ring */ - ring = malloc(new_size * sizeof(*ring), M_IOAT, M_ZERO | M_NOWAIT); - if (ring == NULL) - return (FALSE); + oldsize = (1 << oldorder); + newsize = (1 << (oldorder + 1)); - ioat_log_message(2, "ring resize: new: %d old: %d\n", - new_size, current_size); + mtx_lock(&ioat->cleanup_lock); - /* allocate/trim descriptors as needed */ - if (new_size > current_size) { - /* copy current descriptors to the new ring */ - for (i = 0; i < current_size; i++) { - current_idx = (ioat->tail + i) & (current_size - 1); - new_idx = (ioat->tail + i) & (new_size - 1); + head = ioat->head & (oldsize - 1); + tail = ioat->tail & (oldsize - 1); - ring[new_idx] = ioat->ring[current_idx]; - ring[new_idx]->id = new_idx; - } + /* Copy old descriptors to new ring */ + for (i = 0; i < oldsize; i++) + newring[i] = ioat->ring[i]; - /* add new descriptors to the ring */ - for (i = current_size; i < new_size; i++) { - new_idx = (ioat->tail + i) & (new_size - 1); + /* + * If head has wrapped but tail hasn't, we must swap some descriptors + * around so that tail can increment directly to head. + */ + if (head < tail) { + for (i = 0; i <= head; i++) { + tmp = newring[oldsize + i]; - ring[new_idx] = ioat_alloc_ring_entry(ioat); - if (ring[new_idx] == NULL) { - while (i--) { - new_idx2 = (ioat->tail + i) & - (new_size - 1); + newring[oldsize + i] = newring[i]; + newring[oldsize + i]->id = oldsize + i; - ioat_free_ring_entry(ioat, - ring[new_idx2]); - } - free(ring, M_IOAT); - return (FALSE); - } - ring[new_idx]->id = new_idx; + newring[i] = tmp; + newring[i]->id = i; } + head += oldsize; + } - for (i = current_size - 1; i < new_size; i++) { - new_idx = (ioat->tail + i) & (new_size - 1); - next = ring[(new_idx + 1) & (new_size - 1)]; - hw = ring[new_idx]->u.dma; + KASSERT(head >= tail, ("invariants")); - hw->next = next->hw_desc_bus_addr; - } + /* Head didn't wrap; we only need to link in oldsize..newsize */ + if (head < oldsize) { + i = oldsize - 1; + end = newsize; } else { - /* - * copy current descriptors to the new ring, dropping the - * removed descriptors - */ - for (i = 0; i < new_size; i++) { - current_idx = (ioat->tail + i) & (current_size - 1); - new_idx = (ioat->tail + i) & (new_size - 1); + /* Head did wrap; link newhead..newsize and 0..oldhead */ + i = head; + end = newsize + (head - oldsize) + 1; + } - ring[new_idx] = ioat->ring[current_idx]; - ring[new_idx]->id = new_idx; - } + /* + * Fix up hardware ring, being careful not to trample the active + * section (tail -> head). + */ + for (; i < end; i++) { + KASSERT((i & (newsize - 1)) < tail || + (i & (newsize - 1)) >= head, ("trampling snake")); - /* free deleted descriptors */ - for (i = new_size; i < current_size; i++) { - ent = ioat_get_ring_entry(ioat, ioat->tail + i); - ioat_free_ring_entry(ioat, ent); - } - - /* fix up hardware ring */ - hw = ring[(ioat->tail + new_size - 1) & (new_size - 1)]->u.dma; - next = ring[(ioat->tail + new_size) & (new_size - 1)]; + next = newring[(i + 1) & (newsize - 1)]; + hw = newring[i & (newsize - 1)]->u.dma; hw->next = next->hw_desc_bus_addr; } free(ioat->ring, M_IOAT); - ioat->ring = ring; - ioat->ring_size_order = order; + ioat->ring = newring; + ioat->ring_size_order = oldorder + 1; + ioat->tail = tail; + ioat->head = head; + error = 0; - return (TRUE); + mtx_unlock(&ioat->cleanup_lock); +out: + if (error) + ioat_free_ring(ioat, (1 << (oldorder + 1)), newring); + return (error); } +static int +ring_shrink(struct ioat_softc *ioat, uint32_t oldorder, + struct ioat_descriptor **newring) +{ + struct ioat_dma_hw_descriptor *hw; + struct ioat_descriptor *ent, *next; + uint32_t oldsize, newsize, current_idx, new_idx, i; + int error; + + CTR0(KTR_IOAT, __func__); + + mtx_assert(&ioat->submit_lock, MA_OWNED); + + if (oldorder != ioat->ring_size_order || oldorder <= IOAT_MIN_ORDER) { + error = EINVAL; + goto out_unlocked; + } + + oldsize = (1 << oldorder); + newsize = (1 << (oldorder - 1)); + + mtx_lock(&ioat->cleanup_lock); + + /* Can't shrink below current active set! */ + if (ioat_get_active(ioat) >= newsize) { + error = ENOMEM; + goto out; + } + + /* + * Copy current descriptors to the new ring, dropping the removed + * descriptors. + */ + for (i = 0; i < newsize; i++) { + current_idx = (ioat->tail + i) & (oldsize - 1); + new_idx = (ioat->tail + i) & (newsize - 1); + + newring[new_idx] = ioat->ring[current_idx]; + newring[new_idx]->id = new_idx; + } + + /* Free deleted descriptors */ + for (i = newsize; i < oldsize; i++) { + ent = ioat_get_ring_entry(ioat, ioat->tail + i); + ioat_free_ring_entry(ioat, ent); + } + + /* Fix up hardware ring. */ + hw = newring[(ioat->tail + newsize - 1) & (newsize - 1)]->u.dma; + next = newring[(ioat->tail + newsize) & (newsize - 1)]; + hw->next = next->hw_desc_bus_addr; + + free(ioat->ring, M_IOAT); + ioat->ring = newring; + ioat->ring_size_order = oldorder - 1; + error = 0; + +out: + mtx_unlock(&ioat->cleanup_lock); +out_unlocked: + if (error) + ioat_free_ring(ioat, (1 << (oldorder - 1)), newring); + return (error); +} + static void ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr) { struct ioat_descriptor *desc; ioat_log_message(0, "Channel halted (%x)\n", chanerr); if (chanerr == 0) return; + mtx_lock(&ioat->submit_lock); desc = ioat_get_ring_entry(ioat, ioat->tail + 0); dump_descriptor(desc->u.raw); desc = ioat_get_ring_entry(ioat, ioat->tail + 1); dump_descriptor(desc->u.raw); + mtx_unlock(&ioat->submit_lock); } static void ioat_timer_callback(void *arg) { + struct ioat_descriptor **newring; struct ioat_softc *ioat; uint64_t status; - uint32_t chanerr; + uint32_t chanerr, order; ioat = arg; ioat_log_message(1, "%s\n", __func__); if (ioat->is_completion_pending) { status = ioat_get_chansts(ioat); /* * When halted due to errors, check for channel programming * errors before advancing the completion state. */ if (is_ioat_halted(status)) { chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); ioat_halted_debug(ioat, chanerr); } ioat_process_events(ioat); } else { mtx_lock(&ioat->submit_lock); - mtx_lock(&ioat->cleanup_lock); + order = ioat->ring_size_order; + if (ioat->is_resize_pending || order == IOAT_MIN_ORDER) { + mtx_unlock(&ioat->submit_lock); + goto out; + } + ioat->is_resize_pending = TRUE; + mtx_unlock(&ioat->submit_lock); - if (ioat_get_active(ioat) == 0 && - ioat->ring_size_order > IOAT_MIN_ORDER) - resize_ring(ioat, ioat->ring_size_order - 1); + newring = ioat_prealloc_ring(ioat, 1 << (order - 1), FALSE, + M_NOWAIT); - mtx_unlock(&ioat->cleanup_lock); + mtx_lock(&ioat->submit_lock); + KASSERT(ioat->ring_size_order == order, + ("resize_pending protects order")); + + if (newring != NULL) + ring_shrink(ioat, order, newring); + + ioat->is_resize_pending = FALSE; mtx_unlock(&ioat->submit_lock); +out: + /* Slowly scale the ring down if idle. */ if (ioat->ring_size_order > IOAT_MIN_ORDER) - callout_reset(&ioat->timer, IOAT_INTR_TIMO, + callout_reset(&ioat->timer, 10 * hz, ioat_timer_callback, ioat); } } /* * Support Functions */ static void ioat_submit_single(struct ioat_softc *ioat) { ioat_get(ioat, IOAT_ACTIVE_DESCR_REF); atomic_add_rel_int(&ioat->head, 1); + atomic_add_rel_int(&ioat->hw_head, 1); if (!ioat->is_completion_pending) { ioat->is_completion_pending = TRUE; callout_reset(&ioat->timer, IOAT_INTR_TIMO, ioat_timer_callback, ioat); } } static int ioat_reset_hw(struct ioat_softc *ioat) { uint64_t status; uint32_t chanerr; unsigned timeout; status = ioat_get_chansts(ioat); if (is_ioat_active(status) || is_ioat_idle(status)) ioat_suspend(ioat); /* Wait at most 20 ms */ for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) && timeout < 20; timeout++) { DELAY(1000); status = ioat_get_chansts(ioat); } if (timeout == 20) return (ETIMEDOUT); KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce")); chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr); /* * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors * that can cause stability issues for IOAT v3. */ pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07, 4); chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4); pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4); /* * BDXDE and BWD models reset MSI-X registers on device reset. * Save/restore their contents manually. */ if (ioat_model_resets_msix(ioat)) { ioat_log_message(1, "device resets MSI-X registers; saving\n"); pci_save_state(ioat->device); } ioat_reset(ioat); /* Wait at most 20 ms */ for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++) DELAY(1000); if (timeout == 20) return (ETIMEDOUT); if (ioat_model_resets_msix(ioat)) { ioat_log_message(1, "device resets registers; restored\n"); pci_restore_state(ioat->device); } /* Reset attempts to return the hardware to "halted." */ status = ioat_get_chansts(ioat); if (is_ioat_active(status) || is_ioat_idle(status)) { /* So this really shouldn't happen... */ ioat_log_message(0, "Device is active after a reset?\n"); ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); return (0); } chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET); ioat_halted_debug(ioat, chanerr); if (chanerr != 0) return (EIO); /* * Bring device back online after reset. Writing CHAINADDR brings the * device back to active. * * The internal ring counter resets to zero, so we have to start over * at zero as well. */ - ioat->tail = ioat->head = 0; + ioat->tail = ioat->head = ioat->hw_head = 0; ioat->last_seen = 0; ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN); ioat_write_chancmp(ioat, ioat->comp_update_bus_addr); ioat_write_chainaddr(ioat, ioat->ring[0]->hw_desc_bus_addr); return (ioat_start_channel(ioat)); } static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS) { struct ioat_softc *ioat; int error, arg; ioat = arg1; arg = 0; error = SYSCTL_OUT(req, &arg, sizeof(arg)); if (error != 0 || req->newptr == NULL) return (error); error = SYSCTL_IN(req, &arg, sizeof(arg)); if (error != 0) return (error); if (arg != 0) error = ioat_reset_hw(ioat); return (error); } static void dump_descriptor(void *hw_desc) { int i, j; for (i = 0; i < 2; i++) { for (j = 0; j < 8; j++) printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]); printf("\n"); } } static void ioat_setup_sysctl(device_t device) { struct sysctl_oid_list *par; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct ioat_softc *ioat; ioat = DEVICE2SOFTC(device); ctx = device_get_sysctl_ctx(device); tree = device_get_sysctl_tree(device); par = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD, &ioat->version, 0, "HW version (0xMM form)"); SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD, &ioat->max_xfer_size, 0, "HW maximum transfer size"); SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "ring_size_order", CTLFLAG_RD, - &ioat->ring_size_order, 0, "HW descriptor ring size order"); + &ioat->ring_size_order, 0, "SW descriptor ring size order"); SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "head", CTLFLAG_RD, &ioat->head, 0, - "HW descriptor head pointer index"); + "SW descriptor head pointer index"); SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail, 0, - "HW descriptor tail pointer index"); + "SW descriptor tail pointer index"); + SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "hw_head", CTLFLAG_RD, + &ioat->hw_head, 0, "HW DMACOUNT"); SYSCTL_ADD_UQUAD(ctx, par, OID_AUTO, "last_completion", CTLFLAG_RD, ioat->comp_update, "HW addr of last completion"); SYSCTL_ADD_INT(ctx, par, OID_AUTO, "is_resize_pending", CTLFLAG_RD, &ioat->is_resize_pending, 0, "resize pending"); SYSCTL_ADD_INT(ctx, par, OID_AUTO, "is_completion_pending", CTLFLAG_RD, &ioat->is_completion_pending, 0, "completion pending"); SYSCTL_ADD_INT(ctx, par, OID_AUTO, "is_reset_pending", CTLFLAG_RD, &ioat->is_reset_pending, 0, "reset pending"); SYSCTL_ADD_INT(ctx, par, OID_AUTO, "is_channel_running", CTLFLAG_RD, &ioat->is_channel_running, 0, "channel running"); SYSCTL_ADD_PROC(ctx, par, OID_AUTO, "force_hw_reset", CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I", "Set to non-zero to reset the hardware"); } static inline struct ioat_softc * ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind) { uint32_t old; KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); old = atomic_fetchadd_32(&ioat->refcnt, 1); KASSERT(old < UINT32_MAX, ("refcnt overflow")); #ifdef INVARIANTS old = atomic_fetchadd_32(&ioat->refkinds[kind], 1); KASSERT(old < UINT32_MAX, ("refcnt kind overflow")); #endif return (ioat); } static inline void ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind) { uint32_t old; KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus")); if (n == 0) return; #ifdef INVARIANTS old = atomic_fetchadd_32(&ioat->refkinds[kind], -n); KASSERT(old >= n, ("refcnt kind underflow")); #endif /* Skip acquiring the lock if resulting refcnt > 0. */ for (;;) { old = ioat->refcnt; if (old <= n) break; if (atomic_cmpset_32(&ioat->refcnt, old, old - n)) return; } mtx_lock(IOAT_REFLK); old = atomic_fetchadd_32(&ioat->refcnt, -n); KASSERT(old >= n, ("refcnt error")); if (old == n) wakeup(IOAT_REFLK); mtx_unlock(IOAT_REFLK); } static inline void ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind) { ioat_putn(ioat, 1, kind); } static void ioat_drain(struct ioat_softc *ioat) { mtx_lock(IOAT_REFLK); while (ioat->refcnt > 0) msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0); mtx_unlock(IOAT_REFLK); } Index: head/sys/dev/ioat/ioat.h =================================================================== --- head/sys/dev/ioat/ioat.h (revision 289981) +++ head/sys/dev/ioat/ioat.h (revision 289982) @@ -1,80 +1,85 @@ /*- * Copyright (C) 2012 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ __FBSDID("$FreeBSD$"); #ifndef __IOAT_H__ #define __IOAT_H__ #include #include /* * This file defines the public interface to the IOAT driver. */ /* * Enables an interrupt for this operation. Typically, you would only enable * this on the last operation in a group */ #define DMA_INT_EN 0x1 -#define DMA_ALL_FLAGS (DMA_INT_EN) +/* + * Like M_NOWAIT. Operations will return NULL if they cannot allocate a + * descriptor without blocking. + */ +#define DMA_NO_WAIT 0x2 +#define DMA_ALL_FLAGS (DMA_INT_EN | DMA_NO_WAIT) typedef void *bus_dmaengine_t; struct bus_dmadesc; typedef void (*bus_dmaengine_callback_t)(void *arg); /* * Called first to acquire a reference to the DMA channel */ bus_dmaengine_t ioat_get_dmaengine(uint32_t channel_index); /* Release the DMA channel */ void ioat_put_dmaengine(bus_dmaengine_t dmaengine); /* * Acquire must be called before issuing an operation to perform. Release is * called after. Multiple operations can be issued within the context of one * acquire and release */ void ioat_acquire(bus_dmaengine_t dmaengine); void ioat_release(bus_dmaengine_t dmaengine); /* Issues the copy data operation */ struct bus_dmadesc *ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags); /* * Issues a null operation. This issues the operation to the hardware, but the * hardware doesn't do anything with it. */ struct bus_dmadesc *ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags); #endif /* __IOAT_H__ */ Index: head/sys/dev/ioat/ioat_internal.h =================================================================== --- head/sys/dev/ioat/ioat_internal.h (revision 289981) +++ head/sys/dev/ioat/ioat_internal.h (revision 289982) @@ -1,457 +1,458 @@ /*- * Copyright (C) 2012 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ __FBSDID("$FreeBSD$"); #ifndef __IOAT_INTERNAL_H__ #define __IOAT_INTERNAL_H__ #define DEVICE2SOFTC(dev) ((struct ioat_softc *) device_get_softc(dev)) #define KTR_IOAT KTR_SPARE3 #define ioat_read_chancnt(ioat) \ ioat_read_1((ioat), IOAT_CHANCNT_OFFSET) #define ioat_read_xfercap(ioat) \ (ioat_read_1((ioat), IOAT_XFERCAP_OFFSET) & IOAT_XFERCAP_VALID_MASK) #define ioat_write_intrctrl(ioat, value) \ ioat_write_1((ioat), IOAT_INTRCTRL_OFFSET, (value)) #define ioat_read_cbver(ioat) \ (ioat_read_1((ioat), IOAT_CBVER_OFFSET) & 0xFF) #define ioat_read_dmacapability(ioat) \ ioat_read_4((ioat), IOAT_DMACAPABILITY_OFFSET) #define ioat_write_chanctrl(ioat, value) \ ioat_write_2((ioat), IOAT_CHANCTRL_OFFSET, (value)) static __inline uint64_t ioat_bus_space_read_8_lower_first(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t offset) { return (bus_space_read_4(tag, handle, offset) | ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32); } static __inline void ioat_bus_space_write_8_lower_first(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t offset, uint64_t val) { bus_space_write_4(tag, handle, offset, val); bus_space_write_4(tag, handle, offset + 4, val >> 32); } #ifdef __i386__ #define ioat_bus_space_read_8 ioat_bus_space_read_8_lower_first #define ioat_bus_space_write_8 ioat_bus_space_write_8_lower_first #else #define ioat_bus_space_read_8(tag, handle, offset) \ bus_space_read_8((tag), (handle), (offset)) #define ioat_bus_space_write_8(tag, handle, offset, val) \ bus_space_write_8((tag), (handle), (offset), (val)) #endif #define ioat_read_1(ioat, offset) \ bus_space_read_1((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \ (offset)) #define ioat_read_2(ioat, offset) \ bus_space_read_2((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \ (offset)) #define ioat_read_4(ioat, offset) \ bus_space_read_4((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \ (offset)) #define ioat_read_8(ioat, offset) \ ioat_bus_space_read_8((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \ (offset)) #define ioat_read_double_4(ioat, offset) \ ioat_bus_space_read_8_lower_first((ioat)->pci_bus_tag, \ (ioat)->pci_bus_handle, (offset)) #define ioat_write_1(ioat, offset, value) \ bus_space_write_1((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \ (offset), (value)) #define ioat_write_2(ioat, offset, value) \ bus_space_write_2((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \ (offset), (value)) #define ioat_write_4(ioat, offset, value) \ bus_space_write_4((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \ (offset), (value)) #define ioat_write_8(ioat, offset, value) \ ioat_bus_space_write_8((ioat)->pci_bus_tag, (ioat)->pci_bus_handle, \ (offset), (value)) #define ioat_write_double_4(ioat, offset, value) \ ioat_bus_space_write_8_lower_first((ioat)->pci_bus_tag, \ (ioat)->pci_bus_handle, (offset), (value)) MALLOC_DECLARE(M_IOAT); SYSCTL_DECL(_hw_ioat); extern int g_ioat_debug_level; struct ioat_dma_hw_descriptor { uint32_t size; union { uint32_t control_raw; struct { uint32_t int_enable:1; uint32_t src_snoop_disable:1; uint32_t dest_snoop_disable:1; uint32_t completion_update:1; uint32_t fence:1; uint32_t null:1; uint32_t src_page_break:1; uint32_t dest_page_break:1; uint32_t bundle:1; uint32_t dest_dca:1; uint32_t hint:1; uint32_t reserved:13; #define IOAT_OP_COPY 0x00 uint32_t op:8; } control; } u; uint64_t src_addr; uint64_t dest_addr; uint64_t next; uint64_t reserved; uint64_t reserved2; uint64_t user1; uint64_t user2; }; struct ioat_fill_hw_descriptor { uint32_t size; union { uint32_t control_raw; struct { uint32_t int_enable:1; uint32_t reserved:1; uint32_t dest_snoop_disable:1; uint32_t completion_update:1; uint32_t fence:1; uint32_t reserved2:2; uint32_t dest_page_break:1; uint32_t bundle:1; uint32_t reserved3:15; #define IOAT_OP_FILL 0x01 uint32_t op:8; } control; } u; uint64_t src_data; uint64_t dest_addr; uint64_t next; uint64_t reserved; uint64_t next_dest_addr; uint64_t user1; uint64_t user2; }; struct ioat_xor_hw_descriptor { uint32_t size; union { uint32_t control_raw; struct { uint32_t int_enable:1; uint32_t src_snoop_disable:1; uint32_t dest_snoop_disable:1; uint32_t completion_update:1; uint32_t fence:1; uint32_t src_count:3; uint32_t bundle:1; uint32_t dest_dca:1; uint32_t hint:1; uint32_t reserved:13; #define IOAT_OP_XOR 0x87 #define IOAT_OP_XOR_VAL 0x88 uint32_t op:8; } control; } u; uint64_t src_addr; uint64_t dest_addr; uint64_t next; uint64_t src_addr2; uint64_t src_addr3; uint64_t src_addr4; uint64_t src_addr5; }; struct ioat_xor_ext_hw_descriptor { uint64_t src_addr6; uint64_t src_addr7; uint64_t src_addr8; uint64_t next; uint64_t reserved[4]; }; struct ioat_pq_hw_descriptor { uint32_t size; union { uint32_t control_raw; struct { uint32_t int_enable:1; uint32_t src_snoop_disable:1; uint32_t dest_snoop_disable:1; uint32_t completion_update:1; uint32_t fence:1; uint32_t src_count:3; uint32_t bundle:1; uint32_t dest_dca:1; uint32_t hint:1; uint32_t p_disable:1; uint32_t q_disable:1; uint32_t reserved:11; #define IOAT_OP_PQ 0x89 #define IOAT_OP_PQ_VAL 0x8a uint32_t op:8; } control; } u; uint64_t src_addr; uint64_t p_addr; uint64_t next; uint64_t src_addr2; uint64_t src_addr3; uint8_t coef[8]; uint64_t q_addr; }; struct ioat_pq_ext_hw_descriptor { uint64_t src_addr4; uint64_t src_addr5; uint64_t src_addr6; uint64_t next; uint64_t src_addr7; uint64_t src_addr8; uint64_t reserved[2]; }; struct ioat_pq_update_hw_descriptor { uint32_t size; union { uint32_t control_raw; struct { uint32_t int_enable:1; uint32_t src_snoop_disable:1; uint32_t dest_snoop_disable:1; uint32_t completion_update:1; uint32_t fence:1; uint32_t src_cnt:3; uint32_t bundle:1; uint32_t dest_dca:1; uint32_t hint:1; uint32_t p_disable:1; uint32_t q_disable:1; uint32_t reserved:3; uint32_t coef:8; #define IOAT_OP_PQ_UP 0x8b uint32_t op:8; } control; } u; uint64_t src_addr; uint64_t p_addr; uint64_t next; uint64_t src_addr2; uint64_t p_src; uint64_t q_src; uint64_t q_addr; }; struct ioat_raw_hw_descriptor { uint64_t field[8]; }; struct bus_dmadesc { bus_dmaengine_callback_t callback_fn; void *callback_arg; }; struct ioat_descriptor { struct bus_dmadesc bus_dmadesc; union { struct ioat_dma_hw_descriptor *dma; struct ioat_fill_hw_descriptor *fill; struct ioat_xor_hw_descriptor *xor; struct ioat_xor_ext_hw_descriptor *xor_ext; struct ioat_pq_hw_descriptor *pq; struct ioat_pq_ext_hw_descriptor *pq_ext; struct ioat_raw_hw_descriptor *raw; } u; uint32_t id; uint32_t length; enum validate_flags *validate_result; bus_addr_t hw_desc_bus_addr; }; enum ioat_ref_kind { IOAT_DMAENGINE_REF = 0, IOAT_ACTIVE_DESCR_REF, IOAT_NUM_REF_KINDS }; /* One of these per allocated PCI device. */ struct ioat_softc { bus_dmaengine_t dmaengine; #define to_ioat_softc(_dmaeng) \ ({ \ bus_dmaengine_t *_p = (_dmaeng); \ (struct ioat_softc *)((char *)_p - \ offsetof(struct ioat_softc, dmaengine)); \ }) int version; struct mtx submit_lock; device_t device; bus_space_tag_t pci_bus_tag; bus_space_handle_t pci_bus_handle; int pci_resource_id; struct resource *pci_resource; uint32_t max_xfer_size; struct resource *res; int rid; void *tag; bus_dma_tag_t hw_desc_tag; bus_dmamap_t hw_desc_map; bus_dma_tag_t comp_update_tag; bus_dmamap_t comp_update_map; uint64_t *comp_update; bus_addr_t comp_update_bus_addr; struct callout timer; boolean_t is_resize_pending; boolean_t is_completion_pending; boolean_t is_reset_pending; boolean_t is_channel_running; uint32_t head; uint32_t tail; + uint32_t hw_head; uint32_t ring_size_order; bus_addr_t last_seen; struct ioat_descriptor **ring; struct mtx cleanup_lock; volatile uint32_t refcnt; #ifdef INVARIANTS volatile uint32_t refkinds[IOAT_NUM_REF_KINDS]; #endif }; void ioat_test_attach(void); void ioat_test_detach(void); static inline uint64_t ioat_get_chansts(struct ioat_softc *ioat) { uint64_t status; if (ioat->version >= IOAT_VER_3_3) status = ioat_read_8(ioat, IOAT_CHANSTS_OFFSET); else /* Must read lower 4 bytes before upper 4 bytes. */ status = ioat_read_double_4(ioat, IOAT_CHANSTS_OFFSET); return (status); } static inline void ioat_write_chancmp(struct ioat_softc *ioat, uint64_t addr) { if (ioat->version >= IOAT_VER_3_3) ioat_write_8(ioat, IOAT_CHANCMP_OFFSET_LOW, addr); else ioat_write_double_4(ioat, IOAT_CHANCMP_OFFSET_LOW, addr); } static inline void ioat_write_chainaddr(struct ioat_softc *ioat, uint64_t addr) { if (ioat->version >= IOAT_VER_3_3) ioat_write_8(ioat, IOAT_CHAINADDR_OFFSET_LOW, addr); else ioat_write_double_4(ioat, IOAT_CHAINADDR_OFFSET_LOW, addr); } static inline boolean_t is_ioat_active(uint64_t status) { return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_ACTIVE); } static inline boolean_t is_ioat_idle(uint64_t status) { return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_IDLE); } static inline boolean_t is_ioat_halted(uint64_t status) { return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_HALTED); } static inline boolean_t is_ioat_suspended(uint64_t status) { return ((status & IOAT_CHANSTS_STATUS) == IOAT_CHANSTS_SUSPENDED); } static inline void ioat_suspend(struct ioat_softc *ioat) { ioat_write_1(ioat, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_SUSPEND); } static inline void ioat_reset(struct ioat_softc *ioat) { ioat_write_1(ioat, IOAT_CHANCMD_OFFSET, IOAT_CHANCMD_RESET); } static inline boolean_t ioat_reset_pending(struct ioat_softc *ioat) { uint8_t cmd; cmd = ioat_read_1(ioat, IOAT_CHANCMD_OFFSET); return ((cmd & IOAT_CHANCMD_RESET) != 0); } #endif /* __IOAT_INTERNAL_H__ */