Index: head/sys/dev/aacraid/aacraid.c =================================================================== --- head/sys/dev/aacraid/aacraid.c (revision 327948) +++ head/sys/dev/aacraid/aacraid.c (revision 327949) @@ -1,3864 +1,3864 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2000 Michael Smith * Copyright (c) 2001 Scott Long * Copyright (c) 2000 BSDi * Copyright (c) 2001-2010 Adaptec, Inc. * Copyright (c) 2010-2012 PMC-Sierra, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers */ #define AAC_DRIVERNAME "aacraid" #include "opt_aacraid.h" /* #include */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef FILTER_HANDLED #define FILTER_HANDLED 0x02 #endif static void aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f, u_int32_t uid); static void aac_get_bus_info(struct aac_softc *sc); static void aac_container_bus(struct aac_softc *sc); static void aac_daemon(void *arg); static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw, int pages, int nseg, int nseg_new); /* Command Processing */ static void aac_timeout(struct aac_softc *sc); static void aac_command_thread(struct aac_softc *sc); static int aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, struct aac_fib *fib, u_int16_t datasize); /* Command Buffer Management */ static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int aac_alloc_commands(struct aac_softc *sc); static void aac_free_commands(struct aac_softc *sc); static void aac_unmap_command(struct aac_command *cm); /* Hardware Interface */ static int aac_alloc(struct aac_softc *sc); static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int aac_check_firmware(struct aac_softc *sc); static void aac_define_int_mode(struct aac_softc *sc); static int aac_init(struct aac_softc *sc); static int aac_find_pci_capability(struct aac_softc *sc, int cap); static int aac_setup_intr(struct aac_softc *sc); static int aac_check_config(struct aac_softc *sc); /* PMC SRC interface */ static int aac_src_get_fwstatus(struct aac_softc *sc); static void aac_src_qnotify(struct aac_softc *sc, int qbit); static int aac_src_get_istatus(struct aac_softc *sc); static void aac_src_clear_istatus(struct aac_softc *sc, int mask); static void aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_src_get_mailbox(struct aac_softc *sc, int mb); static void aac_src_access_devreg(struct aac_softc *sc, int mode); static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm); static int aac_src_get_outb_queue(struct aac_softc *sc); static void aac_src_set_outb_queue(struct aac_softc *sc, int index); struct aac_interface aacraid_src_interface = { aac_src_get_fwstatus, aac_src_qnotify, aac_src_get_istatus, aac_src_clear_istatus, aac_src_set_mailbox, aac_src_get_mailbox, aac_src_access_devreg, aac_src_send_command, aac_src_get_outb_queue, aac_src_set_outb_queue }; /* PMC SRCv interface */ static void aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_srcv_get_mailbox(struct aac_softc *sc, int mb); struct aac_interface aacraid_srcv_interface = { aac_src_get_fwstatus, aac_src_qnotify, aac_src_get_istatus, aac_src_clear_istatus, aac_srcv_set_mailbox, aac_srcv_get_mailbox, aac_src_access_devreg, aac_src_send_command, aac_src_get_outb_queue, aac_src_set_outb_queue }; /* Debugging and Diagnostics */ static struct aac_code_lookup aac_cpu_variant[] = { {"i960JX", CPUI960_JX}, {"i960CX", CPUI960_CX}, {"i960HX", CPUI960_HX}, {"i960RX", CPUI960_RX}, {"i960 80303", CPUI960_80303}, {"StrongARM SA110", CPUARM_SA110}, {"PPC603e", CPUPPC_603e}, {"XScale 80321", CPU_XSCALE_80321}, {"MIPS 4KC", CPU_MIPS_4KC}, {"MIPS 5KC", CPU_MIPS_5KC}, {"Unknown StrongARM", CPUARM_xxx}, {"Unknown PowerPC", CPUPPC_xxx}, {NULL, 0}, {"Unknown processor", 0} }; static struct aac_code_lookup aac_battery_platform[] = { {"required battery present", PLATFORM_BAT_REQ_PRESENT}, {"REQUIRED BATTERY NOT PRESENT", PLATFORM_BAT_REQ_NOTPRESENT}, {"optional battery present", PLATFORM_BAT_OPT_PRESENT}, {"optional battery not installed", PLATFORM_BAT_OPT_NOTPRESENT}, {"no battery support", PLATFORM_BAT_NOT_SUPPORTED}, {NULL, 0}, {"unknown battery platform", 0} }; static void aac_describe_controller(struct aac_softc *sc); static char *aac_describe_code(struct aac_code_lookup *table, u_int32_t code); /* Management Interface */ static d_open_t aac_open; static d_ioctl_t aac_ioctl; static d_poll_t aac_poll; #if __FreeBSD_version >= 702000 static void aac_cdevpriv_dtor(void *arg); #else static d_close_t aac_close; #endif static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib); static void aac_request_aif(struct aac_softc *sc); static int aac_rev_check(struct aac_softc *sc, caddr_t udata); static int aac_open_aif(struct aac_softc *sc, caddr_t arg); static int aac_close_aif(struct aac_softc *sc, caddr_t arg); static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); static int aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr); static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); static int aac_supported_features(struct aac_softc *sc, caddr_t uptr); static void aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg); static int aac_reset_adapter(struct aac_softc *sc); static int aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid, struct aac_mntinforesp *mir, u_int32_t *uid); static u_int32_t aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled); static struct cdevsw aacraid_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = aac_open, #if __FreeBSD_version < 702000 .d_close = aac_close, #endif .d_ioctl = aac_ioctl, .d_poll = aac_poll, .d_name = "aacraid", }; MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver"); /* sysctl node */ SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD, 0, "AACRAID driver parameters"); /* * Device Interface */ /* * Initialize the controller and softc */ int aacraid_attach(struct aac_softc *sc) { int error, unit; struct aac_fib *fib; struct aac_mntinforesp mir; int count = 0, i = 0; u_int32_t uid; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->hint_flags = device_get_flags(sc->aac_dev); /* * Initialize per-controller queues. */ aac_initq_free(sc); aac_initq_ready(sc); aac_initq_busy(sc); /* mark controller as suspended until we get ourselves organised */ sc->aac_state |= AAC_STATE_SUSPEND; /* * Check that the firmware on the card is supported. */ sc->msi_enabled = FALSE; if ((error = aac_check_firmware(sc)) != 0) return(error); /* * Initialize locks */ mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF); TAILQ_INIT(&sc->aac_container_tqh); TAILQ_INIT(&sc->aac_ev_cmfree); #if __FreeBSD_version >= 800000 /* Initialize the clock daemon callout. */ callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0); #endif /* * Initialize the adapter. */ if ((error = aac_alloc(sc)) != 0) return(error); if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) { aac_define_int_mode(sc); if ((error = aac_init(sc)) != 0) return(error); } /* * Allocate and connect our interrupt. */ if ((error = aac_setup_intr(sc)) != 0) return(error); /* * Print a little information about the controller. */ aac_describe_controller(sc); /* * Make the control device. */ unit = device_get_unit(sc->aac_dev); sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, "aacraid%d", unit); sc->aac_dev_t->si_drv1 = sc; /* Create the AIF thread */ if (aac_kthread_create((void(*)(void *))aac_command_thread, sc, &sc->aifthread, 0, 0, "aacraid%daif", unit)) panic("Could not create AIF thread"); /* Register the shutdown method to only be called post-dump */ if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown, sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) device_printf(sc->aac_dev, "shutdown event registration failed\n"); /* Find containers */ mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); /* loop over possible containers */ do { if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0) continue; if (i == 0) count = mir.MntRespCount; aac_add_container(sc, &mir, 0, uid); i++; } while ((i < count) && (i < AAC_MAX_CONTAINERS)); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); /* Register with CAM for the containers */ TAILQ_INIT(&sc->aac_sim_tqh); aac_container_bus(sc); /* Register with CAM for the non-DASD devices */ if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) aac_get_bus_info(sc); /* poke the bus to actually attach the child devices */ bus_generic_attach(sc->aac_dev); /* mark the controller up */ sc->aac_state &= ~AAC_STATE_SUSPEND; /* enable interrupts now */ AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT); #if __FreeBSD_version >= 800000 mtx_lock(&sc->aac_io_lock); callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc); mtx_unlock(&sc->aac_io_lock); #else { struct timeval tv; tv.tv_sec = 60; tv.tv_usec = 0; sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv)); } #endif return(0); } static void aac_daemon(void *arg) { struct aac_softc *sc; struct timeval tv; struct aac_command *cm; struct aac_fib *fib; sc = arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); #if __FreeBSD_version >= 800000 mtx_assert(&sc->aac_io_lock, MA_OWNED); if (callout_pending(&sc->aac_daemontime) || callout_active(&sc->aac_daemontime) == 0) return; #else mtx_lock(&sc->aac_io_lock); #endif getmicrotime(&tv); if (!aacraid_alloc_command(sc, &cm)) { fib = cm->cm_fib; cm->cm_timestamp = time_uptime; cm->cm_datalen = 0; cm->cm_flags |= AAC_CMD_WAIT; fib->Header.Size = sizeof(struct aac_fib_header) + sizeof(u_int32_t); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC | AAC_FIBSTATE_FAST_RESPONSE; fib->Header.Command = SendHostTime; *(uint32_t *)fib->data = tv.tv_sec; aacraid_map_command_sg(cm, NULL, 0, 0); aacraid_release_command(cm); } #if __FreeBSD_version >= 800000 callout_schedule(&sc->aac_daemontime, 30 * 60 * hz); #else mtx_unlock(&sc->aac_io_lock); tv.tv_sec = 30 * 60; tv.tv_usec = 0; sc->timeout_id = timeout(aac_daemon, (void *)sc, tvtohz(&tv)); #endif } void aacraid_add_event(struct aac_softc *sc, struct aac_event *event) { switch (event->ev_type & AAC_EVENT_MASK) { case AAC_EVENT_CMFREE: TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); break; default: device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", event->ev_type); break; } return; } /* * Request information of container #cid */ static int aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid, struct aac_mntinforesp *mir, u_int32_t *uid) { struct aac_command *cm; struct aac_fib *fib; struct aac_mntinfo *mi; struct aac_cnt_config *ccfg; int rval; if (sync_fib == NULL) { if (aacraid_alloc_command(sc, &cm)) { device_printf(sc->aac_dev, "Warning, no free command available\n"); return (-1); } fib = cm->cm_fib; } else { fib = sync_fib; } mi = (struct aac_mntinfo *)&fib->data[0]; /* 4KB support?, 64-bit LBA? */ if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE) mi->Command = VM_NameServeAllBlk; else if (sc->flags & AAC_FLAGS_LBA_64BIT) mi->Command = VM_NameServe64; else mi->Command = VM_NameServe; mi->MntType = FT_FILESYS; mi->MntCount = cid; if (sync_fib) { if (aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_mntinfo))) { device_printf(sc->aac_dev, "Error probing container %d\n", cid); return (-1); } } else { cm->cm_timestamp = time_uptime; cm->cm_datalen = 0; fib->Header.Size = sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC | AAC_FIBSTATE_FAST_RESPONSE; fib->Header.Command = ContainerCommand; if (aacraid_wait_command(cm) != 0) { device_printf(sc->aac_dev, "Error probing container %d\n", cid); aacraid_release_command(cm); return (-1); } } bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp)); /* UID */ *uid = cid; if (mir->MntTable[0].VolType != CT_NONE && !(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) { if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) { mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200; mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0; } ccfg = (struct aac_cnt_config *)&fib->data[0]; bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE); ccfg->Command = VM_ContainerConfig; ccfg->CTCommand.command = CT_CID_TO_32BITS_UID; ccfg->CTCommand.param[0] = cid; if (sync_fib) { rval = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_cnt_config)); if (rval == 0 && ccfg->Command == ST_OK && ccfg->CTCommand.param[0] == CT_OK && mir->MntTable[0].VolType != CT_PASSTHRU) *uid = ccfg->CTCommand.param[1]; } else { fib->Header.Size = sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC | AAC_FIBSTATE_FAST_RESPONSE; fib->Header.Command = ContainerCommand; rval = aacraid_wait_command(cm); if (rval == 0 && ccfg->Command == ST_OK && ccfg->CTCommand.param[0] == CT_OK && mir->MntTable[0].VolType != CT_PASSTHRU) *uid = ccfg->CTCommand.param[1]; aacraid_release_command(cm); } } return (0); } /* * Create a device to represent a new container */ static void aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f, u_int32_t uid) { struct aac_container *co; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Check container volume type for validity. Note that many of * the possible types may never show up. */ if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF, M_NOWAIT | M_ZERO); if (co == NULL) { panic("Out of memory?!"); } co->co_found = f; bcopy(&mir->MntTable[0], &co->co_mntobj, sizeof(struct aac_mntobj)); co->co_uid = uid; TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); } } /* * Allocate resources associated with (sc) */ static int aac_alloc(struct aac_softc *sc) { bus_size_t maxsize; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Create DMA tag for mapping buffers into controller-addressable space. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_SG_64BIT) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->aac_max_sectors << 9, /* maxsize */ sc->aac_sg_tablesize, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->aac_io_lock, /* lockfuncarg */ &sc->aac_buffer_dmat)) { device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); return (ENOMEM); } /* * Create DMA tag for mapping FIBs into controller-addressable space.. */ if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + sizeof(struct aac_fib_xporthdr) + 31); else maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31); if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_4GB_WINDOW) ? BUS_SPACE_MAXADDR_32BIT : 0x7fffffff, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ maxsize, /* maxsize */ 1, /* nsegments */ maxsize, /* maxsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &sc->aac_fib_dmat)) { device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n"); return (ENOMEM); } /* * Create DMA tag for the common structure and allocate it. */ maxsize = sizeof(struct aac_common); maxsize += sc->aac_max_fibs * sizeof(u_int32_t); if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_4GB_WINDOW) ? BUS_SPACE_MAXADDR_32BIT : 0x7fffffff, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ maxsize, /* maxsize */ 1, /* nsegments */ maxsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &sc->aac_common_dmat)) { device_printf(sc->aac_dev, "can't allocate common structure DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { device_printf(sc->aac_dev, "can't allocate common structure\n"); return (ENOMEM); } (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, sc->aac_common, maxsize, aac_common_map, sc, 0); bzero(sc->aac_common, maxsize); /* Allocate some FIBs and associated command structs */ TAILQ_INIT(&sc->aac_fibmap_tqh); sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command), M_AACRAIDBUF, M_WAITOK|M_ZERO); mtx_lock(&sc->aac_io_lock); while (sc->total_fibs < sc->aac_max_fibs) { if (aac_alloc_commands(sc) != 0) break; } mtx_unlock(&sc->aac_io_lock); if (sc->total_fibs == 0) return (ENOMEM); return (0); } /* * Free all of the resources associated with (sc) * * Should not be called if the controller is active. */ void aacraid_free(struct aac_softc *sc) { int i; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* remove the control device */ if (sc->aac_dev_t != NULL) destroy_dev(sc->aac_dev_t); /* throw away any FIB buffers, discard the FIB DMA tag */ aac_free_commands(sc); if (sc->aac_fib_dmat) bus_dma_tag_destroy(sc->aac_fib_dmat); free(sc->aac_commands, M_AACRAIDBUF); /* destroy the common area */ if (sc->aac_common) { bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, sc->aac_common_dmamap); } if (sc->aac_common_dmat) bus_dma_tag_destroy(sc->aac_common_dmat); /* disconnect the interrupt handler */ for (i = 0; i < AAC_MAX_MSIX; ++i) { if (sc->aac_intr[i]) bus_teardown_intr(sc->aac_dev, sc->aac_irq[i], sc->aac_intr[i]); if (sc->aac_irq[i]) bus_release_resource(sc->aac_dev, SYS_RES_IRQ, sc->aac_irq_rid[i], sc->aac_irq[i]); else break; } if (sc->msi_enabled) pci_release_msi(sc->aac_dev); /* destroy data-transfer DMA tag */ if (sc->aac_buffer_dmat) bus_dma_tag_destroy(sc->aac_buffer_dmat); /* destroy the parent DMA tag */ if (sc->aac_parent_dmat) bus_dma_tag_destroy(sc->aac_parent_dmat); /* release the register window mapping */ if (sc->aac_regs_res0 != NULL) bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, sc->aac_regs_rid0, sc->aac_regs_res0); if (sc->aac_regs_res1 != NULL) bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, sc->aac_regs_rid1, sc->aac_regs_res1); } /* * Disconnect from the controller completely, in preparation for unload. */ int aacraid_detach(device_t dev) { struct aac_softc *sc; struct aac_container *co; struct aac_sim *sim; int error; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); #if __FreeBSD_version >= 800000 callout_drain(&sc->aac_daemontime); #else untimeout(aac_daemon, (void *)sc, sc->timeout_id); #endif /* Remove the child containers */ while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); free(co, M_AACRAIDBUF); } /* Remove the CAM SIMs */ while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); error = device_delete_child(dev, sim->sim_dev); if (error) return (error); free(sim, M_AACRAIDBUF); } if (sc->aifflags & AAC_AIFFLAGS_RUNNING) { sc->aifflags |= AAC_AIFFLAGS_EXIT; wakeup(sc->aifthread); tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz); } if (sc->aifflags & AAC_AIFFLAGS_RUNNING) panic("Cannot shutdown AIF thread"); if ((error = aacraid_shutdown(dev))) return(error); EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); aacraid_free(sc); mtx_destroy(&sc->aac_io_lock); return(0); } /* * Bring the controller down to a dormant state and detach all child devices. * * This function is called before detach or system shutdown. * * Note that we can assume that the bioq on the controller is empty, as we won't * allow shutdown if any device is open. */ int aacraid_shutdown(device_t dev) { struct aac_softc *sc; struct aac_fib *fib; struct aac_close_command *cc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state |= AAC_STATE_SUSPEND; /* * Send a Container shutdown followed by a HostShutdown FIB to the * controller to convince it that we don't want to talk to it anymore. * We've been closed and all I/O completed already */ device_printf(sc->aac_dev, "shutting down controller..."); mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); cc = (struct aac_close_command *)&fib->data[0]; bzero(cc, sizeof(struct aac_close_command)); cc->Command = VM_CloseAll; cc->ContainerId = 0xfffffffe; if (aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_close_command))) printf("FAILED.\n"); else printf("done\n"); AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return(0); } /* * Bring the controller to a quiescent state, ready for system suspend. */ int aacraid_suspend(device_t dev) { struct aac_softc *sc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state |= AAC_STATE_SUSPEND; AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT); return(0); } /* * Bring the controller back to a state ready for operation. */ int aacraid_resume(device_t dev) { struct aac_softc *sc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state &= ~AAC_STATE_SUSPEND; AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT); return(0); } /* * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface. */ void aacraid_new_intr_type1(void *arg) { struct aac_msix_ctx *ctx; struct aac_softc *sc; int vector_no; struct aac_command *cm; struct aac_fib *fib; u_int32_t bellbits, bellbits_shifted, index, handle; int isFastResponse, isAif, noMoreAif, mode; ctx = (struct aac_msix_ctx *)arg; sc = ctx->sc; vector_no = ctx->vector_no; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); if (sc->msi_enabled) { mode = AAC_INT_MODE_MSI; if (vector_no == 0) { bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI); if (bellbits & 0x40000) mode |= AAC_INT_MODE_AIF; else if (bellbits & 0x1000) mode |= AAC_INT_MODE_SYNC; } } else { mode = AAC_INT_MODE_INTX; bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R); if (bellbits & AAC_DB_RESPONSE_SENT_NS) { bellbits = AAC_DB_RESPONSE_SENT_NS; AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits); } else { bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT); AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits); if (bellbits_shifted & AAC_DB_AIF_PENDING) mode |= AAC_INT_MODE_AIF; else if (bellbits_shifted & AAC_DB_SYNC_COMMAND) mode |= AAC_INT_MODE_SYNC; } /* ODR readback, Prep #238630 */ AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R); } if (mode & AAC_INT_MODE_SYNC) { if (sc->aac_sync_cm) { cm = sc->aac_sync_cm; cm->cm_flags |= AAC_CMD_COMPLETED; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this command */ wakeup(cm); } sc->flags &= ~AAC_QUEUE_FRZN; sc->aac_sync_cm = NULL; } mode = 0; } if (mode & AAC_INT_MODE_AIF) { if (mode & AAC_INT_MODE_INTX) { aac_request_aif(sc); mode = 0; } } if (mode) { /* handle async. status */ index = sc->aac_host_rrq_idx[vector_no]; for (;;) { isFastResponse = isAif = noMoreAif = 0; /* remove toggle bit (31) */ handle = (sc->aac_common->ac_host_rrq[index] & 0x7fffffff); /* check fast response bit (30) */ if (handle & 0x40000000) isFastResponse = 1; /* check AIF bit (23) */ else if (handle & 0x00800000) isAif = TRUE; handle &= 0x0000ffff; if (handle == 0) break; cm = sc->aac_commands + (handle - 1); fib = cm->cm_fib; sc->aac_rrq_outstanding[vector_no]--; if (isAif) { noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0; if (!noMoreAif) aac_handle_aif(sc, fib); aac_remove_busy(cm); aacraid_release_command(cm); } else { if (isFastResponse) { fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; *((u_int32_t *)(fib->data)) = ST_OK; cm->cm_flags |= AAC_CMD_FASTRESP; } aac_remove_busy(cm); aac_unmap_command(cm); cm->cm_flags |= AAC_CMD_COMPLETED; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this command */ wakeup(cm); } sc->flags &= ~AAC_QUEUE_FRZN; } sc->aac_common->ac_host_rrq[index++] = 0; if (index == (vector_no + 1) * sc->aac_vector_cap) index = vector_no * sc->aac_vector_cap; sc->aac_host_rrq_idx[vector_no] = index; if ((isAif && !noMoreAif) || sc->aif_pending) aac_request_aif(sc); } } if (mode & AAC_INT_MODE_AIF) { aac_request_aif(sc); AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT); mode = 0; } /* see if we can start some more I/O */ if ((sc->flags & AAC_QUEUE_FRZN) == 0) aacraid_startio(sc); mtx_unlock(&sc->aac_io_lock); } /* * Handle notification of one or more FIBs coming from the controller. */ static void aac_command_thread(struct aac_softc *sc) { int retval; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); sc->aifflags = AAC_AIFFLAGS_RUNNING; while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { retval = 0; if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO, "aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz); /* * First see if any FIBs need to be allocated. This needs * to be called without the driver lock because contigmalloc * will grab Giant, and would result in an LOR. */ if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { aac_alloc_commands(sc); sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; aacraid_startio(sc); } /* * While we're here, check to see if any commands are stuck. * This is pretty low-priority, so it's ok if it doesn't * always fire. */ if (retval == EWOULDBLOCK) aac_timeout(sc); /* Check the hardware printf message buffer */ if (sc->aac_common->ac_printf[0] != 0) aac_print_printf(sc); } sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; mtx_unlock(&sc->aac_io_lock); wakeup(sc->aac_dev); aac_kthread_exit(0); } /* * Submit a command to the controller, return when it completes. * XXX This is very dangerous! If the card has gone out to lunch, we could * be stuck here forever. At the same time, signals are not caught * because there is a risk that a signal could wakeup the sleep before * the card has a chance to complete the command. Since there is no way * to cancel a command that is in progress, we can't protect against the * card completing a command late and spamming the command and data * memory. So, we are held hostage until the command completes. */ int aacraid_wait_command(struct aac_command *cm) { struct aac_softc *sc; int error; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); /* Put the command on the ready queue and get things going */ aac_enqueue_ready(cm); aacraid_startio(sc); error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0); return(error); } /* *Command Buffer Management */ /* * Allocate a command. */ int aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp) { struct aac_command *cm; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if ((cm = aac_dequeue_free(sc)) == NULL) { if (sc->total_fibs < sc->aac_max_fibs) { sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; wakeup(sc->aifthread); } return (EBUSY); } *cmp = cm; return(0); } /* * Release a command back to the freelist. */ void aacraid_release_command(struct aac_command *cm) { struct aac_event *event; struct aac_softc *sc; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); /* (re)initialize the command/FIB */ cm->cm_sgtable = NULL; cm->cm_flags = 0; cm->cm_complete = NULL; cm->cm_ccb = NULL; cm->cm_passthr_dmat = 0; cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; cm->cm_fib->Header.Unused = 0; cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; /* * These are duplicated in aac_start to cover the case where an * intermediate stage may have destroyed them. They're left * initialized here for debugging purposes only. */ cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; cm->cm_fib->Header.Handle = 0; aac_enqueue_free(cm); /* * Dequeue all events so that there's no risk of events getting * stranded. */ while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); event->ev_callback(sc, event, event->ev_arg); } } /* * Map helper for command/FIB allocation. */ static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { uint64_t *fibphys; fibphys = (uint64_t *)arg; *fibphys = segs[0].ds_addr; } /* * Allocate and initialize commands/FIBs for this adapter. */ static int aac_alloc_commands(struct aac_softc *sc) { struct aac_command *cm; struct aac_fibmap *fm; uint64_t fibphys; int i, error; u_int32_t maxsize; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) return (ENOMEM); fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO); if (fm == NULL) return (ENOMEM); mtx_unlock(&sc->aac_io_lock); /* allocate the FIBs in DMAable memory and load them */ if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, BUS_DMA_NOWAIT, &fm->aac_fibmap)) { device_printf(sc->aac_dev, "Not enough contiguous memory available.\n"); free(fm, M_AACRAIDBUF); mtx_lock(&sc->aac_io_lock); return (ENOMEM); } maxsize = sc->aac_max_fib_size + 31; if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) maxsize += sizeof(struct aac_fib_xporthdr); /* Ignore errors since this doesn't bounce */ (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize, aac_map_command_helper, &fibphys, 0); mtx_lock(&sc->aac_io_lock); /* initialize constant fields in the command structure */ bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize); for (i = 0; i < sc->aac_max_fibs_alloc; i++) { cm = sc->aac_commands + sc->total_fibs; fm->aac_commands = cm; cm->cm_sc = sc; cm->cm_fib = (struct aac_fib *) ((u_int8_t *)fm->aac_fibs + i * maxsize); cm->cm_fibphys = fibphys + i * maxsize; if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) { u_int64_t fibphys_aligned; fibphys_aligned = (cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31; cm->cm_fib = (struct aac_fib *) ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys)); cm->cm_fibphys = fibphys_aligned; } else { u_int64_t fibphys_aligned; fibphys_aligned = (cm->cm_fibphys + 31) & ~31; cm->cm_fib = (struct aac_fib *) ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys)); cm->cm_fibphys = fibphys_aligned; } cm->cm_index = sc->total_fibs; if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, &cm->cm_datamap)) != 0) break; if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1) aacraid_release_command(cm); sc->total_fibs++; } if (i > 0) { TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); return (0); } bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); free(fm, M_AACRAIDBUF); return (ENOMEM); } /* * Free FIBs owned by this adapter. */ static void aac_free_commands(struct aac_softc *sc) { struct aac_fibmap *fm; struct aac_command *cm; int i; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); /* * We check against total_fibs to handle partially * allocated blocks. */ for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { cm = fm->aac_commands + i; bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); } bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); free(fm, M_AACRAIDBUF); } } /* * Command-mapping helper function - populate this command's s/g table. */ void aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aac_softc *sc; struct aac_command *cm; struct aac_fib *fib; int i; cm = (struct aac_command *)arg; sc = cm->cm_sc; fib = cm->cm_fib; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg); mtx_assert(&sc->aac_io_lock, MA_OWNED); /* copy into the FIB */ if (cm->cm_sgtable != NULL) { if (fib->Header.Command == RawIo2) { struct aac_raw_io2 *raw; struct aac_sge_ieee1212 *sg; u_int32_t min_size = PAGE_SIZE, cur_size; int conformable = TRUE; raw = (struct aac_raw_io2 *)&fib->data[0]; sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable; raw->sgeCnt = nseg; for (i = 0; i < nseg; i++) { cur_size = segs[i].ds_len; sg[i].addrHigh = 0; *(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr; sg[i].length = cur_size; sg[i].flags = 0; if (i == 0) { raw->sgeFirstSize = cur_size; } else if (i == 1) { raw->sgeNominalSize = cur_size; min_size = cur_size; } else if ((i+1) < nseg && cur_size != raw->sgeNominalSize) { conformable = FALSE; if (cur_size < min_size) min_size = cur_size; } } /* not conformable: evaluate required sg elements */ if (!conformable) { int j, err_found, nseg_new = nseg; for (i = min_size / PAGE_SIZE; i >= 1; --i) { err_found = FALSE; nseg_new = 2; for (j = 1; j < nseg - 1; ++j) { if (sg[j].length % (i*PAGE_SIZE)) { err_found = TRUE; break; } nseg_new += (sg[j].length / (i*PAGE_SIZE)); } if (!err_found) break; } if (i>0 && nseg_new<=sc->aac_sg_tablesize && !(sc->hint_flags & 4)) nseg = aac_convert_sgraw2(sc, raw, i, nseg, nseg_new); } else { raw->flags |= RIO2_SGL_CONFORMANT; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg * sizeof(struct aac_sge_ieee1212); } else if (fib->Header.Command == RawIo) { struct aac_sg_tableraw *sg; sg = (struct aac_sg_tableraw *)cm->cm_sgtable; sg->SgCount = nseg; for (i = 0; i < nseg; i++) { sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; sg->SgEntryRaw[i].Next = 0; sg->SgEntryRaw[i].Prev = 0; sg->SgEntryRaw[i].Flags = 0; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { struct aac_sg_table *sg; sg = cm->cm_sgtable; sg->SgCount = nseg; for (i = 0; i < nseg; i++) { sg->SgEntry[i].SgAddress = segs[i].ds_addr; sg->SgEntry[i].SgByteCount = segs[i].ds_len; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entry); } else { struct aac_sg_table64 *sg; sg = (struct aac_sg_table64 *)cm->cm_sgtable; sg->SgCount = nseg; for (i = 0; i < nseg; i++) { sg->SgEntry64[i].SgAddress = segs[i].ds_addr; sg->SgEntry64[i].SgByteCount = segs[i].ds_len; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); } } /* Fix up the address values in the FIB. Use the command array index * instead of a pointer since these fields are only 32 bits. Shift * the SenderFibAddress over to make room for the fast response bit * and for the AIF bit */ cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; /* save a pointer to the command for speedy reverse-lookup */ cm->cm_fib->Header.Handle += cm->cm_index + 1; if (cm->cm_passthr_dmat == 0) { if (cm->cm_flags & AAC_CMD_DATAIN) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_PREREAD); if (cm->cm_flags & AAC_CMD_DATAOUT) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_PREWRITE); } cm->cm_flags |= AAC_CMD_MAPPED; if (sc->flags & AAC_FLAGS_SYNC_MODE) { u_int32_t wait = 0; aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, &wait, NULL); } else if (cm->cm_flags & AAC_CMD_WAIT) { aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, NULL, NULL); } else { int count = 10000000L; while (AAC_SEND_COMMAND(sc, cm) != 0) { if (--count == 0) { aac_unmap_command(cm); sc->flags |= AAC_QUEUE_FRZN; aac_requeue_ready(cm); } DELAY(5); /* wait 5 usec. */ } } } static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw, int pages, int nseg, int nseg_new) { struct aac_sge_ieee1212 *sge; int i, j, pos; u_int32_t addr_low; - sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212), + sge = mallocarray(nseg_new, sizeof(struct aac_sge_ieee1212), M_AACRAIDBUF, M_NOWAIT|M_ZERO); if (sge == NULL) return nseg; for (i = 1, pos = 1; i < nseg - 1; ++i) { for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) { addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE; sge[pos].addrLow = addr_low; sge[pos].addrHigh = raw->sge[i].addrHigh; if (addr_low < raw->sge[i].addrLow) sge[pos].addrHigh++; sge[pos].length = pages * PAGE_SIZE; sge[pos].flags = 0; pos++; } } sge[pos] = raw->sge[nseg-1]; for (i = 1; i < nseg_new; ++i) raw->sge[i] = sge[i]; free(sge, M_AACRAIDBUF); raw->sgeCnt = nseg_new; raw->flags |= RIO2_SGL_CONFORMANT; raw->sgeNominalSize = pages * PAGE_SIZE; return nseg_new; } /* * Unmap a command from controller-visible space. */ static void aac_unmap_command(struct aac_command *cm) { struct aac_softc *sc; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (!(cm->cm_flags & AAC_CMD_MAPPED)) return; if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) { if (cm->cm_flags & AAC_CMD_DATAIN) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_POSTREAD); if (cm->cm_flags & AAC_CMD_DATAOUT) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); } cm->cm_flags &= ~AAC_CMD_MAPPED; } /* * Hardware Interface */ /* * Initialize the adapter. */ static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aac_softc *sc; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_common_busaddr = segs[0].ds_addr; } static int aac_check_firmware(struct aac_softc *sc) { u_int32_t code, major, minor, maxsize; u_int32_t options = 0, atu_size = 0, status, waitCount; time_t then; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* check if flash update is running */ if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) { then = time_uptime; do { code = AAC_GET_FWSTATUS(sc); if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) { device_printf(sc->aac_dev, "FATAL: controller not coming ready, " "status %x\n", code); return(ENXIO); } } while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED)); /* * Delay 10 seconds. Because right now FW is doing a soft reset, * do not read scratch pad register at this time */ waitCount = 10 * 10000; while (waitCount) { DELAY(100); /* delay 100 microseconds */ waitCount--; } } /* * Wait for the adapter to come ready. */ then = time_uptime; do { code = AAC_GET_FWSTATUS(sc); if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { device_printf(sc->aac_dev, "FATAL: controller not coming ready, " "status %x\n", code); return(ENXIO); } } while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff); /* * Retrieve the firmware version numbers. Dell PERC2/QC cards with * firmware version 1.x are not compatible with this driver. */ if (sc->flags & AAC_FLAGS_PERC2QC) { if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, NULL, NULL)) { device_printf(sc->aac_dev, "Error reading firmware version\n"); return (EIO); } /* These numbers are stored as ASCII! */ major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; if (major == 1) { device_printf(sc->aac_dev, "Firmware version %d.%d is not supported.\n", major, minor); return (EINVAL); } } /* * Retrieve the capabilities/supported options word so we know what * work-arounds to enable. Some firmware revs don't support this * command. */ if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) { if (status != AAC_SRB_STS_INVALID_REQUEST) { device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); return (EIO); } } else { options = AAC_GET_MAILBOX(sc, 1); atu_size = AAC_GET_MAILBOX(sc, 2); sc->supported_options = options; if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && (sc->flags & AAC_FLAGS_NO4GB) == 0) sc->flags |= AAC_FLAGS_4GB_WINDOW; if (options & AAC_SUPPORTED_NONDASD) sc->flags |= AAC_FLAGS_ENABLE_CAM; if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 && (sizeof(bus_addr_t) > 4) && (sc->hint_flags & 0x1)) { device_printf(sc->aac_dev, "Enabling 64-bit address support\n"); sc->flags |= AAC_FLAGS_SG_64BIT; } if (sc->aac_if.aif_send_command) { if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) || (options & AAC_SUPPORTED_NEW_COMM_TYPE4)) sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34; else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1) sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1; else if (options & AAC_SUPPORTED_NEW_COMM_TYPE2) sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2; } if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) sc->flags |= AAC_FLAGS_ARRAY_64BIT; } if (!(sc->flags & AAC_FLAGS_NEW_COMM)) { device_printf(sc->aac_dev, "Communication interface not supported!\n"); return (ENXIO); } if (sc->hint_flags & 2) { device_printf(sc->aac_dev, "Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n"); sc->flags |= AAC_FLAGS_SYNC_MODE; } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) { device_printf(sc->aac_dev, "Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n"); sc->flags |= AAC_FLAGS_SYNC_MODE; } /* Check for broken hardware that does a lower number of commands */ sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); /* Remap mem. resource, if required */ if (atu_size > rman_get_size(sc->aac_regs_res0)) { bus_release_resource( sc->aac_dev, SYS_RES_MEMORY, sc->aac_regs_rid0, sc->aac_regs_res0); sc->aac_regs_res0 = bus_alloc_resource_anywhere( sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0, atu_size, RF_ACTIVE); if (sc->aac_regs_res0 == NULL) { sc->aac_regs_res0 = bus_alloc_resource_any( sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0, RF_ACTIVE); if (sc->aac_regs_res0 == NULL) { device_printf(sc->aac_dev, "couldn't allocate register window\n"); return (ENXIO); } } sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0); sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0); } /* Read preferred settings */ sc->aac_max_fib_size = sizeof(struct aac_fib); sc->aac_max_sectors = 128; /* 64KB */ sc->aac_max_aif = 1; if (sc->flags & AAC_FLAGS_SG_64BIT) sc->aac_sg_tablesize = (AAC_FIB_DATASIZE - sizeof(struct aac_blockwrite64)) / sizeof(struct aac_sg_entry64); else sc->aac_sg_tablesize = (AAC_FIB_DATASIZE - sizeof(struct aac_blockwrite)) / sizeof(struct aac_sg_entry); if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) { options = AAC_GET_MAILBOX(sc, 1); sc->aac_max_fib_size = (options & 0xFFFF); sc->aac_max_sectors = (options >> 16) << 1; options = AAC_GET_MAILBOX(sc, 2); sc->aac_sg_tablesize = (options >> 16); options = AAC_GET_MAILBOX(sc, 3); sc->aac_max_fibs = ((options >> 16) & 0xFFFF); if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV) sc->aac_max_fibs = (options & 0xFFFF); options = AAC_GET_MAILBOX(sc, 4); sc->aac_max_aif = (options & 0xFFFF); options = AAC_GET_MAILBOX(sc, 5); sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0; } maxsize = sc->aac_max_fib_size + 31; if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) maxsize += sizeof(struct aac_fib_xporthdr); if (maxsize > PAGE_SIZE) { sc->aac_max_fib_size -= (maxsize - PAGE_SIZE); maxsize = PAGE_SIZE; } sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize; if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { sc->flags |= AAC_FLAGS_RAW_IO; device_printf(sc->aac_dev, "Enable Raw I/O\n"); } if ((sc->flags & AAC_FLAGS_RAW_IO) && (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { sc->flags |= AAC_FLAGS_LBA_64BIT; device_printf(sc->aac_dev, "Enable 64-bit array\n"); } #ifdef AACRAID_DEBUG aacraid_get_fw_debug_buffer(sc); #endif return (0); } static int aac_init(struct aac_softc *sc) { struct aac_adapter_init *ip; int i, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* reset rrq index */ sc->aac_fibs_pushed_no = 0; for (i = 0; i < sc->aac_max_msix; i++) sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap; /* * Fill in the init structure. This tells the adapter about the * physical location of various important shared data structures. */ ip = &sc->aac_common->ac_init; ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; sc->flags |= AAC_FLAGS_RAW_IO; } ip->NoOfMSIXVectors = sc->aac_max_msix; ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_fibs); ip->AdapterFibsVirtualAddress = 0; ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); ip->AdapterFibAlign = sizeof(struct aac_fib); ip->PrintfBufferAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_printf); ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; /* * The adapter assumes that pages are 4K in size, except on some * broken firmware versions that do the page->byte conversion twice, * therefore 'assuming' that this value is in 16MB units (2^24). * Round up since the granularity is so high. */ ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { ip->HostPhysMemPages = (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; } ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED; if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) { ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6; ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | AAC_INITFLAGS_FAST_JBOD_SUPPORTED); device_printf(sc->aac_dev, "New comm. interface type1 enabled\n"); } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) { ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7; ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | AAC_INITFLAGS_FAST_JBOD_SUPPORTED); device_printf(sc->aac_dev, "New comm. interface type2 enabled\n"); } ip->MaxNumAif = sc->aac_max_aif; ip->HostRRQ_AddrLow = sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq); /* always 32-bit address */ ip->HostRRQ_AddrHigh = 0; if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) { ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM; ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME; device_printf(sc->aac_dev, "Power Management enabled\n"); } ip->MaxIoCommands = sc->aac_max_fibs; ip->MaxIoSize = sc->aac_max_sectors << 9; ip->MaxFibSize = sc->aac_max_fib_size; /* * Do controller-type-specific initialisation */ AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0); /* * Give the init structure to the controller. */ if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT, sc->aac_common_busaddr + offsetof(struct aac_common, ac_init), 0, 0, 0, NULL, NULL)) { device_printf(sc->aac_dev, "error establishing init structure\n"); error = EIO; goto out; } /* * Check configuration issues */ if ((error = aac_check_config(sc)) != 0) goto out; error = 0; out: return(error); } static void aac_define_int_mode(struct aac_softc *sc) { device_t dev; int cap, msi_count, error = 0; uint32_t val; dev = sc->aac_dev; /* max. vectors from AAC_MONKER_GETCOMMPREF */ if (sc->aac_max_msix == 0) { sc->aac_max_msix = 1; sc->aac_vector_cap = sc->aac_max_fibs; return; } /* OS capability */ msi_count = pci_msix_count(dev); if (msi_count > AAC_MAX_MSIX) msi_count = AAC_MAX_MSIX; if (msi_count > sc->aac_max_msix) msi_count = sc->aac_max_msix; if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) { device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; " "will try MSI\n", msi_count, error); pci_release_msi(dev); } else { sc->msi_enabled = TRUE; device_printf(dev, "using MSI-X interrupts (%u vectors)\n", msi_count); } if (!sc->msi_enabled) { msi_count = 1; if ((error = pci_alloc_msi(dev, &msi_count)) != 0) { device_printf(dev, "alloc msi failed - err=%d; " "will use INTx\n", error); pci_release_msi(dev); } else { sc->msi_enabled = TRUE; device_printf(dev, "using MSI interrupts\n"); } } if (sc->msi_enabled) { /* now read controller capability from PCI config. space */ cap = aac_find_pci_capability(sc, PCIY_MSIX); val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0); if (!(val & AAC_PCI_MSI_ENABLE)) { pci_release_msi(dev); sc->msi_enabled = FALSE; } } if (!sc->msi_enabled) { device_printf(dev, "using legacy interrupts\n"); sc->aac_max_msix = 1; } else { AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX); if (sc->aac_max_msix > msi_count) sc->aac_max_msix = msi_count; } sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix; fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d", sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix); } static int aac_find_pci_capability(struct aac_softc *sc, int cap) { device_t dev; uint32_t status; uint8_t ptr; dev = sc->aac_dev; status = pci_read_config(dev, PCIR_STATUS, 2); if (!(status & PCIM_STATUS_CAPPRESENT)) return (0); status = pci_read_config(dev, PCIR_HDRTYPE, 1); switch (status & PCIM_HDRTYPE) { case 0: case 1: ptr = PCIR_CAP_PTR; break; case 2: ptr = PCIR_CAP_PTR_2; break; default: return (0); break; } ptr = pci_read_config(dev, ptr, 1); while (ptr != 0) { int next, val; next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1); val = pci_read_config(dev, ptr + PCICAP_ID, 1); if (val == cap) return (ptr); ptr = next; } return (0); } static int aac_setup_intr(struct aac_softc *sc) { int i, msi_count, rid; struct resource *res; void *tag; msi_count = sc->aac_max_msix; rid = (sc->msi_enabled ? 1:0); for (i = 0; i < msi_count; i++, rid++) { if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(sc->aac_dev,"can't allocate interrupt\n"); return (EINVAL); } sc->aac_irq_rid[i] = rid; sc->aac_irq[i] = res; if (aac_bus_setup_intr(sc->aac_dev, res, INTR_MPSAFE | INTR_TYPE_BIO, NULL, aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) { device_printf(sc->aac_dev, "can't set up interrupt\n"); return (EINVAL); } sc->aac_msix[i].vector_no = i; sc->aac_msix[i].sc = sc; sc->aac_intr[i] = tag; } return (0); } static int aac_check_config(struct aac_softc *sc) { struct aac_fib *fib; struct aac_cnt_config *ccfg; struct aac_cf_status_hdr *cf_shdr; int rval; mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); ccfg = (struct aac_cnt_config *)&fib->data[0]; bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE); ccfg->Command = VM_ContainerConfig; ccfg->CTCommand.command = CT_GET_CONFIG_STATUS; ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr); rval = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof (struct aac_cnt_config)); cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data; if (rval == 0 && ccfg->Command == ST_OK && ccfg->CTCommand.param[0] == CT_OK) { if (cf_shdr->action <= CFACT_PAUSE) { bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE); ccfg->Command = VM_ContainerConfig; ccfg->CTCommand.command = CT_COMMIT_CONFIG; rval = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof (struct aac_cnt_config)); if (rval == 0 && ccfg->Command == ST_OK && ccfg->CTCommand.param[0] == CT_OK) { /* successful completion */ rval = 0; } else { /* auto commit aborted due to error(s) */ rval = -2; } } else { /* auto commit aborted due to adapter indicating config. issues too dangerous to auto commit */ rval = -3; } } else { /* error */ rval = -1; } aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return(rval); } /* * Send a synchronous command to the controller and wait for a result. * Indicate if the controller completed the command with an error status. */ int aacraid_sync_command(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, u_int32_t *sp, u_int32_t *r1) { time_t then; u_int32_t status; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* populate the mailbox */ AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); /* ensure the sync command doorbell flag is cleared */ if (!sc->msi_enabled) AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); /* then set it to signal the adapter */ AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) { /* spin waiting for the command to complete */ then = time_uptime; do { if (time_uptime > (then + AAC_SYNC_TIMEOUT)) { fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); return(EIO); } } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); /* clear the completion flag */ AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); /* get the command status */ status = AAC_GET_MAILBOX(sc, 0); if (sp != NULL) *sp = status; /* return parameter */ if (r1 != NULL) *r1 = AAC_GET_MAILBOX(sc, 1); if (status != AAC_SRB_STS_SUCCESS) return (-1); } return(0); } static int aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, struct aac_fib *fib, u_int16_t datasize) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); if (datasize > AAC_FIB_DATASIZE) return(EINVAL); /* * Set up the sync FIB */ fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY; fib->Header.XferState |= xferstate; fib->Header.Command = command; fib->Header.StructType = AAC_FIBTYPE_TFIB; fib->Header.Size = sizeof(struct aac_fib_header) + datasize; fib->Header.SenderSize = sizeof(struct aac_fib); fib->Header.SenderFibAddress = 0; /* Not needed */ fib->Header.u.ReceiverFibAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_sync_fib); /* * Give the FIB to the controller, wait for a response. */ if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, fib->Header.u.ReceiverFibAddress, 0, 0, 0, NULL, NULL)) { fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); return(EIO); } return (0); } /* * Check for commands that have been outstanding for a suspiciously long time, * and complain about them. */ static void aac_timeout(struct aac_softc *sc) { struct aac_command *cm; time_t deadline; int timedout; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Traverse the busy command list, bitch about late commands once * only. */ timedout = 0; deadline = time_uptime - AAC_CMD_TIMEOUT; TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { if (cm->cm_timestamp < deadline) { device_printf(sc->aac_dev, "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm, (int)(time_uptime-cm->cm_timestamp)); AAC_PRINT_FIB(sc, cm->cm_fib); timedout++; } } if (timedout) aac_reset_adapter(sc); aacraid_print_queues(sc); } /* * Interface Function Vectors */ /* * Read the current firmware status word. */ static int aac_src_get_fwstatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR)); } /* * Notify the controller of a change in a given queue */ static void aac_src_qnotify(struct aac_softc *sc, int qbit) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT); } /* * Get the interrupt reason bits */ static int aac_src_get_istatus(struct aac_softc *sc) { int val; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (sc->msi_enabled) { val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI); if (val & AAC_MSI_SYNC_STATUS) val = AAC_DB_SYNC_COMMAND; else val = 0; } else { val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT; } return(val); } /* * Clear some interrupt reason bits */ static void aac_src_clear_istatus(struct aac_softc *sc, int mask) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (sc->msi_enabled) { if (mask == AAC_DB_SYNC_COMMAND) AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT); } else { AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT); } } /* * Populate the mailbox and set the command word */ static void aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command); AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0); AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1); AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2); AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3); } static void aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command); AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0); AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1); AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2); AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3); } /* * Fetch the immediate command status word */ static int aac_src_get_mailbox(struct aac_softc *sc, int mb) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4))); } static int aac_srcv_get_mailbox(struct aac_softc *sc, int mb) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4))); } /* * Set/clear interrupt masks */ static void aac_src_access_devreg(struct aac_softc *sc, int mode) { u_int32_t val; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); switch (mode) { case AAC_ENABLE_INTERRUPT: AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, (sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX : AAC_INT_ENABLE_TYPE1_INTX)); break; case AAC_DISABLE_INTERRUPT: AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL); break; case AAC_ENABLE_MSIX: /* set bit 6 */ val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); val |= 0x40; AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); /* unmask int. */ val = PMC_ALL_INTERRUPT_BITS; AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val); val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR); AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0))); break; case AAC_DISABLE_MSIX: /* reset bit 6 */ val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); val &= ~0x40; AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); break; case AAC_CLEAR_AIF_BIT: /* set bit 5 */ val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); val |= 0x20; AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); break; case AAC_CLEAR_SYNC_BIT: /* set bit 4 */ val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); val |= 0x10; AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); break; case AAC_ENABLE_INTX: /* set bit 7 */ val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); val |= 0x80; AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); /* unmask int. */ val = PMC_ALL_INTERRUPT_BITS; AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val); val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR); AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, val & (~(PMC_GLOBAL_INT_BIT2))); break; default: break; } } /* * New comm. interface: Send command functions */ static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm) { struct aac_fib_xporthdr *pFibX; u_int32_t fibsize, high_addr; u_int64_t address; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)"); if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest && sc->aac_max_msix > 1) { u_int16_t vector_no, first_choice = 0xffff; vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix; do { vector_no += 1; if (vector_no == sc->aac_max_msix) vector_no = 1; if (sc->aac_rrq_outstanding[vector_no] < sc->aac_vector_cap) break; if (0xffff == first_choice) first_choice = vector_no; else if (vector_no == first_choice) break; } while (1); if (vector_no == first_choice) vector_no = 0; sc->aac_rrq_outstanding[vector_no]++; if (sc->aac_fibs_pushed_no == 0xffffffff) sc->aac_fibs_pushed_no = 0; else sc->aac_fibs_pushed_no++; cm->cm_fib->Header.Handle += (vector_no << 16); } if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) { /* Calculate the amount to the fibsize bits */ fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1; /* Fill new FIB header */ address = cm->cm_fibphys; high_addr = (u_int32_t)(address >> 32); if (high_addr == 0L) { cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2; cm->cm_fib->Header.u.TimeStamp = 0L; } else { cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64; cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr; } cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address; } else { /* Calculate the amount to the fibsize bits */ fibsize = (sizeof(struct aac_fib_xporthdr) + cm->cm_fib->Header.Size + 127) / 128 - 1; /* Fill XPORT header */ pFibX = (struct aac_fib_xporthdr *) ((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr)); pFibX->Handle = cm->cm_fib->Header.Handle; pFibX->HostAddress = cm->cm_fibphys; pFibX->Size = cm->cm_fib->Header.Size; address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr); high_addr = (u_int32_t)(address >> 32); } if (fibsize > 31) fibsize = 31; aac_enqueue_busy(cm); if (high_addr) { AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr); AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize); } else { AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize); } return 0; } /* * New comm. interface: get, set outbound queue index */ static int aac_src_get_outb_queue(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(-1); } static void aac_src_set_outb_queue(struct aac_softc *sc, int index) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); } /* * Debugging and Diagnostics */ /* * Print some information about the controller. */ static void aac_describe_controller(struct aac_softc *sc) { struct aac_fib *fib; struct aac_adapter_info *info; char *adapter_type = "Adaptec RAID controller"; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { fib->data[0] = 0; if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n"); else { struct aac_supplement_adapter_info *supp_info; supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]); adapter_type = (char *)supp_info->AdapterTypeText; sc->aac_feature_bits = supp_info->FeatureBits; sc->aac_support_opt2 = supp_info->SupportedOptions2; } } device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n", adapter_type, AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION, AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD); fib->data[0] = 0; if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } /* save the kernel revision structure for later use */ info = (struct aac_adapter_info *)&fib->data[0]; sc->aac_revision = info->KernelRevision; if (bootverbose) { device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " "(%dMB cache, %dMB execution), %s\n", aac_describe_code(aac_cpu_variant, info->CpuVariant), info->ClockSpeed, info->TotalMem / (1024 * 1024), info->BufferMem / (1024 * 1024), info->ExecutionMem / (1024 * 1024), aac_describe_code(aac_battery_platform, info->batteryPlatform)); device_printf(sc->aac_dev, "Kernel %d.%d-%d, Build %d, S/N %6X\n", info->KernelRevision.external.comp.major, info->KernelRevision.external.comp.minor, info->KernelRevision.external.comp.dash, info->KernelRevision.buildNumber, (u_int32_t)(info->SerialNumber & 0xffffff)); device_printf(sc->aac_dev, "Supported Options=%b\n", sc->supported_options, "\20" "\1SNAPSHOT" "\2CLUSTERS" "\3WCACHE" "\4DATA64" "\5HOSTTIME" "\6RAID50" "\7WINDOW4GB" "\10SCSIUPGD" "\11SOFTERR" "\12NORECOND" "\13SGMAP64" "\14ALARM" "\15NONDASD" "\16SCSIMGT" "\17RAIDSCSI" "\21ADPTINFO" "\22NEWCOMM" "\23ARRAY64BIT" "\24HEATSENSOR"); } aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); } /* * Look up a text description of a numeric error code and return a pointer to * same. */ static char * aac_describe_code(struct aac_code_lookup *table, u_int32_t code) { int i; for (i = 0; table[i].string != NULL; i++) if (table[i].code == code) return(table[i].string); return(table[i + 1].string); } /* * Management Interface */ static int aac_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct aac_softc *sc; sc = dev->si_drv1; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); #if __FreeBSD_version >= 702000 device_busy(sc->aac_dev); devfs_set_cdevpriv(sc, aac_cdevpriv_dtor); #endif return 0; } static int aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { union aac_statrequest *as; struct aac_softc *sc; int error = 0; as = (union aac_statrequest *)arg; sc = dev->si_drv1; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); switch (cmd) { case AACIO_STATS: switch (as->as_item) { case AACQ_FREE: case AACQ_READY: case AACQ_BUSY: bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, sizeof(struct aac_qstat)); break; default: error = ENOENT; break; } break; case FSACTL_SENDFIB: case FSACTL_SEND_LARGE_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_SENDFIB: case FSACTL_LNX_SEND_LARGE_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); error = aac_ioctl_sendfib(sc, arg); break; case FSACTL_SEND_RAW_SRB: arg = *(caddr_t*)arg; case FSACTL_LNX_SEND_RAW_SRB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); error = aac_ioctl_send_raw_srb(sc, arg); break; case FSACTL_AIF_THREAD: case FSACTL_LNX_AIF_THREAD: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); error = EINVAL; break; case FSACTL_OPEN_GET_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); error = aac_open_aif(sc, arg); break; case FSACTL_GET_NEXT_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); error = aac_getnext_aif(sc, arg); break; case FSACTL_CLOSE_GET_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); error = aac_close_aif(sc, arg); break; case FSACTL_MINIPORT_REV_CHECK: arg = *(caddr_t*)arg; case FSACTL_LNX_MINIPORT_REV_CHECK: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); error = aac_rev_check(sc, arg); break; case FSACTL_QUERY_DISK: arg = *(caddr_t*)arg; case FSACTL_LNX_QUERY_DISK: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); error = aac_query_disk(sc, arg); break; case FSACTL_DELETE_DISK: case FSACTL_LNX_DELETE_DISK: /* * We don't trust the underland to tell us when to delete a * container, rather we rely on an AIF coming from the * controller */ error = 0; break; case FSACTL_GET_PCI_INFO: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_PCI_INFO: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); error = aac_get_pci_info(sc, arg); break; case FSACTL_GET_FEATURES: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_FEATURES: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES"); error = aac_supported_features(sc, arg); break; default: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); error = EINVAL; break; } return(error); } static int aac_poll(struct cdev *dev, int poll_events, struct thread *td) { struct aac_softc *sc; struct aac_fib_context *ctx; int revents; sc = dev->si_drv1; revents = 0; mtx_lock(&sc->aac_io_lock); if ((poll_events & (POLLRDNORM | POLLIN)) != 0) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) { revents |= poll_events & (POLLIN | POLLRDNORM); break; } } } mtx_unlock(&sc->aac_io_lock); if (revents == 0) { if (poll_events & (POLLIN | POLLRDNORM)) selrecord(td, &sc->rcv_select); } return (revents); } static void aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) { switch (event->ev_type) { case AAC_EVENT_CMFREE: mtx_assert(&sc->aac_io_lock, MA_OWNED); if (aacraid_alloc_command(sc, (struct aac_command **)arg)) { aacraid_add_event(sc, event); return; } free(event, M_AACRAIDBUF); wakeup(arg); break; default: break; } } /* * Send a FIB supplied from userspace */ static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) { struct aac_command *cm; int size, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); cm = NULL; /* * Get a command */ mtx_lock(&sc->aac_io_lock); if (aacraid_alloc_command(sc, &cm)) { struct aac_event *event; event = malloc(sizeof(struct aac_event), M_AACRAIDBUF, M_NOWAIT | M_ZERO); if (event == NULL) { error = EBUSY; mtx_unlock(&sc->aac_io_lock); goto out; } event->ev_type = AAC_EVENT_CMFREE; event->ev_callback = aac_ioctl_event; event->ev_arg = &cm; aacraid_add_event(sc, event); msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0); } mtx_unlock(&sc->aac_io_lock); /* * Fetch the FIB header, then re-copy to get data as well. */ if ((error = copyin(ufib, cm->cm_fib, sizeof(struct aac_fib_header))) != 0) goto out; size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); if (size > sc->aac_max_fib_size) { device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", size, sc->aac_max_fib_size); size = sc->aac_max_fib_size; } if ((error = copyin(ufib, cm->cm_fib, size)) != 0) goto out; cm->cm_fib->Header.Size = size; cm->cm_timestamp = time_uptime; cm->cm_datalen = 0; /* * Pass the FIB to the controller, wait for it to complete. */ mtx_lock(&sc->aac_io_lock); error = aacraid_wait_command(cm); mtx_unlock(&sc->aac_io_lock); if (error != 0) { device_printf(sc->aac_dev, "aacraid_wait_command return %d\n", error); goto out; } /* * Copy the FIB and data back out to the caller. */ size = cm->cm_fib->Header.Size; if (size > sc->aac_max_fib_size) { device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", size, sc->aac_max_fib_size); size = sc->aac_max_fib_size; } error = copyout(cm->cm_fib, ufib, size); out: if (cm != NULL) { mtx_lock(&sc->aac_io_lock); aacraid_release_command(cm); mtx_unlock(&sc->aac_io_lock); } return(error); } /* * Send a passthrough FIB supplied from userspace */ static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) { struct aac_command *cm; struct aac_fib *fib; struct aac_srb *srbcmd; struct aac_srb *user_srb = (struct aac_srb *)arg; void *user_reply; int error, transfer_data = 0; bus_dmamap_t orig_map = 0; u_int32_t fibsize = 0; u_int64_t srb_sg_address; u_int32_t srb_sg_bytecount; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); cm = NULL; mtx_lock(&sc->aac_io_lock); if (aacraid_alloc_command(sc, &cm)) { struct aac_event *event; event = malloc(sizeof(struct aac_event), M_AACRAIDBUF, M_NOWAIT | M_ZERO); if (event == NULL) { error = EBUSY; mtx_unlock(&sc->aac_io_lock); goto out; } event->ev_type = AAC_EVENT_CMFREE; event->ev_callback = aac_ioctl_event; event->ev_arg = &cm; aacraid_add_event(sc, event); msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0); } mtx_unlock(&sc->aac_io_lock); cm->cm_data = NULL; /* save original dma map */ orig_map = cm->cm_datamap; fib = cm->cm_fib; srbcmd = (struct aac_srb *)fib->data; if ((error = copyin((void *)&user_srb->data_len, &fibsize, sizeof (u_int32_t)) != 0)) goto out; if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) { error = EINVAL; goto out; } if ((error = copyin((void *)user_srb, srbcmd, fibsize) != 0)) goto out; srbcmd->function = 0; /* SRBF_ExecuteScsi */ srbcmd->retry_limit = 0; /* obsolete */ /* only one sg element from userspace supported */ if (srbcmd->sg_map.SgCount > 1) { error = EINVAL; goto out; } /* check fibsize */ if (fibsize == (sizeof(struct aac_srb) + srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) { struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry; struct aac_sg_entry sg; if ((error = copyin(sgp, &sg, sizeof(sg))) != 0) goto out; srb_sg_bytecount = sg.SgByteCount; srb_sg_address = (u_int64_t)sg.SgAddress; } else if (fibsize == (sizeof(struct aac_srb) + srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) { #ifdef __LP64__ struct aac_sg_entry64 *sgp = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry; struct aac_sg_entry64 sg; if ((error = copyin(sgp, &sg, sizeof(sg))) != 0) goto out; srb_sg_bytecount = sg.SgByteCount; srb_sg_address = sg.SgAddress; if (srb_sg_address > 0xffffffffull && !(sc->flags & AAC_FLAGS_SG_64BIT)) #endif { error = EINVAL; goto out; } } else { error = EINVAL; goto out; } user_reply = (char *)arg + fibsize; srbcmd->data_len = srb_sg_bytecount; if (srbcmd->sg_map.SgCount == 1) transfer_data = 1; if (transfer_data) { /* * Create DMA tag for the passthr. data buffer and allocate it. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_SG_64BIT) ? BUS_SPACE_MAXADDR_32BIT : 0x7fffffff, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ srb_sg_bytecount, /* size */ sc->aac_sg_tablesize, /* nsegments */ srb_sg_bytecount, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &cm->cm_passthr_dmat)) { error = ENOMEM; goto out; } if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data, BUS_DMA_NOWAIT, &cm->cm_datamap)) { error = ENOMEM; goto out; } /* fill some cm variables */ cm->cm_datalen = srb_sg_bytecount; if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) cm->cm_flags |= AAC_CMD_DATAIN; if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) cm->cm_flags |= AAC_CMD_DATAOUT; if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) { if ((error = copyin((void *)(uintptr_t)srb_sg_address, cm->cm_data, cm->cm_datalen)) != 0) goto out; /* sync required for bus_dmamem_alloc() alloc. mem.? */ bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap, BUS_DMASYNC_PREWRITE); } } /* build the FIB */ fib->Header.Size = sizeof(struct aac_fib_header) + sizeof(struct aac_srb); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC; fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ? ScsiPortCommandU64 : ScsiPortCommand; cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map; /* send command */ if (transfer_data) { bus_dmamap_load(cm->cm_passthr_dmat, cm->cm_datamap, cm->cm_data, cm->cm_datalen, aacraid_map_command_sg, cm, 0); } else { aacraid_map_command_sg(cm, NULL, 0, 0); } /* wait for completion */ mtx_lock(&sc->aac_io_lock); while (!(cm->cm_flags & AAC_CMD_COMPLETED)) msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0); mtx_unlock(&sc->aac_io_lock); /* copy data */ if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN)) { if ((error = copyout(cm->cm_data, (void *)(uintptr_t)srb_sg_address, cm->cm_datalen)) != 0) goto out; /* sync required for bus_dmamem_alloc() allocated mem.? */ bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap, BUS_DMASYNC_POSTREAD); } /* status */ error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response)); out: if (cm && cm->cm_data) { if (transfer_data) bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap); bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap); cm->cm_datamap = orig_map; } if (cm && cm->cm_passthr_dmat) bus_dma_tag_destroy(cm->cm_passthr_dmat); if (cm) { mtx_lock(&sc->aac_io_lock); aacraid_release_command(cm); mtx_unlock(&sc->aac_io_lock); } return(error); } /* * Request an AIF from the controller (new comm. type1) */ static void aac_request_aif(struct aac_softc *sc) { struct aac_command *cm; struct aac_fib *fib; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (aacraid_alloc_command(sc, &cm)) { sc->aif_pending = 1; return; } sc->aif_pending = 0; /* build the FIB */ fib = cm->cm_fib; fib->Header.Size = sizeof(struct aac_fib); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC; /* set AIF marker */ fib->Header.Handle = 0x00800000; fib->Header.Command = AifRequest; ((struct aac_aif_command *)fib->data)->command = AifReqEvent; aacraid_map_command_sg(cm, NULL, 0, 0); } #if __FreeBSD_version >= 702000 /* * cdevpriv interface private destructor. */ static void aac_cdevpriv_dtor(void *arg) { struct aac_softc *sc; sc = arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&Giant); device_unbusy(sc->aac_dev); mtx_unlock(&Giant); } #else static int aac_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct aac_softc *sc; sc = dev->si_drv1; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return 0; } #endif /* * Handle an AIF sent to us by the controller; queue it for later reference. * If the queue fills up, then drop the older entries. */ static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) { struct aac_aif_command *aif; struct aac_container *co, *co_next; struct aac_fib_context *ctx; struct aac_fib *sync_fib; struct aac_mntinforesp mir; int next, current, found; int count = 0, changed = 0, i = 0; u_int32_t channel, uid; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); aif = (struct aac_aif_command*)&fib->data[0]; aacraid_print_aif(sc, aif); /* Is it an event that we should care about? */ switch (aif->command) { case AifCmdEventNotify: switch (aif->data.EN.type) { case AifEnAddContainer: case AifEnDeleteContainer: /* * A container was added or deleted, but the message * doesn't tell us anything else! Re-enumerate the * containers and sort things out. */ aac_alloc_sync_fib(sc, &sync_fib); do { /* * Ask the controller for its containers one at * a time. * XXX What if the controller's list changes * midway through this enumaration? * XXX This should be done async. */ if (aac_get_container_info(sc, sync_fib, i, &mir, &uid) != 0) continue; if (i == 0) count = mir.MntRespCount; /* * Check the container against our list. * co->co_found was already set to 0 in a * previous run. */ if ((mir.Status == ST_OK) && (mir.MntTable[0].VolType != CT_NONE)) { found = 0; TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { if (co->co_mntobj.ObjectId == mir.MntTable[0].ObjectId) { co->co_found = 1; found = 1; break; } } /* * If the container matched, continue * in the list. */ if (found) { i++; continue; } /* * This is a new container. Do all the * appropriate things to set it up. */ aac_add_container(sc, &mir, 1, uid); changed = 1; } i++; } while ((i < count) && (i < AAC_MAX_CONTAINERS)); aac_release_sync_fib(sc); /* * Go through our list of containers and see which ones * were not marked 'found'. Since the controller didn't * list them they must have been deleted. Do the * appropriate steps to destroy the device. Also reset * the co->co_found field. */ co = TAILQ_FIRST(&sc->aac_container_tqh); while (co != NULL) { if (co->co_found == 0) { co_next = TAILQ_NEXT(co, co_link); TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); free(co, M_AACRAIDBUF); changed = 1; co = co_next; } else { co->co_found = 0; co = TAILQ_NEXT(co, co_link); } } /* Attach the newly created containers */ if (changed) { if (sc->cam_rescan_cb != NULL) sc->cam_rescan_cb(sc, 0, AAC_CAM_TARGET_WILDCARD); } break; case AifEnEnclosureManagement: switch (aif->data.EN.data.EEE.eventType) { case AIF_EM_DRIVE_INSERTION: case AIF_EM_DRIVE_REMOVAL: channel = aif->data.EN.data.EEE.unitID; if (sc->cam_rescan_cb != NULL) sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1, (channel & 0xFFFF)); break; } break; case AifEnAddJBOD: case AifEnDeleteJBOD: case AifRawDeviceRemove: channel = aif->data.EN.data.ECE.container; if (sc->cam_rescan_cb != NULL) sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1, AAC_CAM_TARGET_WILDCARD); break; default: break; } default: break; } /* Copy the AIF data to the AIF queue for ioctl retrieval */ current = sc->aifq_idx; next = (current + 1) % AAC_AIFQ_LENGTH; if (next == 0) sc->aifq_filled = 1; bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); /* modify AIF contexts */ if (sc->aifq_filled) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (next == ctx->ctx_idx) ctx->ctx_wrap = 1; else if (current == ctx->ctx_idx && ctx->ctx_wrap) ctx->ctx_idx = next; } } sc->aifq_idx = next; /* On the off chance that someone is sleeping for an aif... */ if (sc->aac_state & AAC_STATE_AIF_SLEEPER) wakeup(sc->aac_aifq); /* Wakeup any poll()ers */ selwakeuppri(&sc->rcv_select, PRIBIO); return; } /* * Return the Revision of the driver to userspace and check to see if the * userspace app is possibly compatible. This is extremely bogus since * our driver doesn't follow Adaptec's versioning system. Cheat by just * returning what the card reported. */ static int aac_rev_check(struct aac_softc *sc, caddr_t udata) { struct aac_rev_check rev_check; struct aac_rev_check_resp rev_check_resp; int error = 0; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Copyin the revision struct from userspace */ if ((error = copyin(udata, (caddr_t)&rev_check, sizeof(struct aac_rev_check))) != 0) { return error; } fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", rev_check.callingRevision.buildNumber); /* * Doctor up the response struct. */ rev_check_resp.possiblyCompatible = 1; rev_check_resp.adapterSWRevision.external.comp.major = AAC_DRIVER_MAJOR_VERSION; rev_check_resp.adapterSWRevision.external.comp.minor = AAC_DRIVER_MINOR_VERSION; rev_check_resp.adapterSWRevision.external.comp.type = AAC_DRIVER_TYPE; rev_check_resp.adapterSWRevision.external.comp.dash = AAC_DRIVER_BUGFIX_LEVEL; rev_check_resp.adapterSWRevision.buildNumber = AAC_DRIVER_BUILD; return(copyout((caddr_t)&rev_check_resp, udata, sizeof(struct aac_rev_check_resp))); } /* * Pass the fib context to the caller */ static int aac_open_aif(struct aac_softc *sc, caddr_t arg) { struct aac_fib_context *fibctx, *ctx; int error = 0; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO); if (fibctx == NULL) return (ENOMEM); mtx_lock(&sc->aac_io_lock); /* all elements are already 0, add to queue */ if (sc->fibctx == NULL) sc->fibctx = fibctx; else { for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) ; ctx->next = fibctx; fibctx->prev = ctx; } /* evaluate unique value */ fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); ctx = sc->fibctx; while (ctx != fibctx) { if (ctx->unique == fibctx->unique) { fibctx->unique++; ctx = sc->fibctx; } else { ctx = ctx->next; } } error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); mtx_unlock(&sc->aac_io_lock); if (error) aac_close_aif(sc, (caddr_t)ctx); return error; } /* * Close the caller's fib context */ static int aac_close_aif(struct aac_softc *sc, caddr_t arg) { struct aac_fib_context *ctx; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (ctx->unique == *(uint32_t *)&arg) { if (ctx == sc->fibctx) sc->fibctx = NULL; else { ctx->prev->next = ctx->next; if (ctx->next) ctx->next->prev = ctx->prev; } break; } } if (ctx) free(ctx, M_AACRAIDBUF); mtx_unlock(&sc->aac_io_lock); return 0; } /* * Pass the caller the next AIF in their queue */ static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg) { struct get_adapter_fib_ioctl agf; struct aac_fib_context *ctx; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); if ((error = copyin(arg, &agf, sizeof(agf))) == 0) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (agf.AdapterFibContext == ctx->unique) break; } if (!ctx) { mtx_unlock(&sc->aac_io_lock); return (EFAULT); } error = aac_return_aif(sc, ctx, agf.AifFib); if (error == EAGAIN && agf.Wait) { fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); sc->aac_state |= AAC_STATE_AIF_SLEEPER; while (error == EAGAIN) { mtx_unlock(&sc->aac_io_lock); error = tsleep(sc->aac_aifq, PRIBIO | PCATCH, "aacaif", 0); mtx_lock(&sc->aac_io_lock); if (error == 0) error = aac_return_aif(sc, ctx, agf.AifFib); } sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; } } mtx_unlock(&sc->aac_io_lock); return(error); } /* * Hand the next AIF off the top of the queue out to userspace. */ static int aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) { int current, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); current = ctx->ctx_idx; if (current == sc->aifq_idx && !ctx->ctx_wrap) { /* empty */ return (EAGAIN); } error = copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); if (error) device_printf(sc->aac_dev, "aac_return_aif: copyout returned %d\n", error); else { ctx->ctx_wrap = 0; ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; } return(error); } static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) { struct aac_pci_info { u_int32_t bus; u_int32_t slot; } pciinf; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); pciinf.bus = pci_get_bus(sc->aac_dev); pciinf.slot = pci_get_slot(sc->aac_dev); error = copyout((caddr_t)&pciinf, uptr, sizeof(struct aac_pci_info)); return (error); } static int aac_supported_features(struct aac_softc *sc, caddr_t uptr) { struct aac_features f; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if ((error = copyin(uptr, &f, sizeof (f))) != 0) return (error); /* * When the management driver receives FSACTL_GET_FEATURES ioctl with * ALL zero in the featuresState, the driver will return the current * state of all the supported features, the data field will not be * valid. * When the management driver receives FSACTL_GET_FEATURES ioctl with * a specific bit set in the featuresState, the driver will return the * current state of this specific feature and whatever data that are * associated with the feature in the data field or perform whatever * action needed indicates in the data field. */ if (f.feat.fValue == 0) { f.feat.fBits.largeLBA = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; f.feat.fBits.JBODSupport = 1; /* TODO: In the future, add other features state here as well */ } else { if (f.feat.fBits.largeLBA) f.feat.fBits.largeLBA = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; /* TODO: Add other features state and data in the future */ } error = copyout(&f, uptr, sizeof (f)); return (error); } /* * Give the userland some information about the container. The AAC arch * expects the driver to be a SCSI passthrough type driver, so it expects * the containers to have b:t:l numbers. Fake it. */ static int aac_query_disk(struct aac_softc *sc, caddr_t uptr) { struct aac_query_disk query_disk; struct aac_container *co; int error, id; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); error = copyin(uptr, (caddr_t)&query_disk, sizeof(struct aac_query_disk)); if (error) { mtx_unlock(&sc->aac_io_lock); return (error); } id = query_disk.ContainerNumber; if (id == -1) { mtx_unlock(&sc->aac_io_lock); return (EINVAL); } TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { if (co->co_mntobj.ObjectId == id) break; } if (co == NULL) { query_disk.Valid = 0; query_disk.Locked = 0; query_disk.Deleted = 1; /* XXX is this right? */ } else { query_disk.Valid = 1; query_disk.Locked = 1; query_disk.Deleted = 0; query_disk.Bus = device_get_unit(sc->aac_dev); query_disk.Target = 0; query_disk.Lun = 0; query_disk.UnMapped = 0; } error = copyout((caddr_t)&query_disk, uptr, sizeof(struct aac_query_disk)); mtx_unlock(&sc->aac_io_lock); return (error); } static void aac_container_bus(struct aac_softc *sc) { struct aac_sim *sim; device_t child; sim =(struct aac_sim *)malloc(sizeof(struct aac_sim), M_AACRAIDBUF, M_NOWAIT | M_ZERO); if (sim == NULL) { device_printf(sc->aac_dev, "No memory to add container bus\n"); panic("Out of memory?!"); } child = device_add_child(sc->aac_dev, "aacraidp", -1); if (child == NULL) { device_printf(sc->aac_dev, "device_add_child failed for container bus\n"); free(sim, M_AACRAIDBUF); panic("Out of memory?!"); } sim->TargetsPerBus = AAC_MAX_CONTAINERS; sim->BusNumber = 0; sim->BusType = CONTAINER_BUS; sim->InitiatorBusId = -1; sim->aac_sc = sc; sim->sim_dev = child; sim->aac_cam = NULL; device_set_ivars(child, sim); device_set_desc(child, "Container Bus"); TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link); /* device_set_desc(child, aac_describe_code(aac_container_types, mir->MntTable[0].VolType)); */ bus_generic_attach(sc->aac_dev); } static void aac_get_bus_info(struct aac_softc *sc) { struct aac_fib *fib; struct aac_ctcfg *c_cmd; struct aac_ctcfg_resp *c_resp; struct aac_vmioctl *vmi; struct aac_vmi_businf_resp *vmi_resp; struct aac_getbusinf businfo; struct aac_sim *caminf; device_t child; int i, error; mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); c_cmd = (struct aac_ctcfg *)&fib->data[0]; bzero(c_cmd, sizeof(struct aac_ctcfg)); c_cmd->Command = VM_ContainerConfig; c_cmd->cmd = CT_GET_SCSI_METHOD; c_cmd->param = 0; error = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_ctcfg)); if (error) { device_printf(sc->aac_dev, "Error %d sending " "VM_ContainerConfig command\n", error); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; if (c_resp->Status != ST_OK) { device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", c_resp->Status); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } sc->scsi_method_id = c_resp->param; vmi = (struct aac_vmioctl *)&fib->data[0]; bzero(vmi, sizeof(struct aac_vmioctl)); vmi->Command = VM_Ioctl; vmi->ObjType = FT_DRIVE; vmi->MethId = sc->scsi_method_id; vmi->ObjId = 0; vmi->IoctlCmd = GetBusInfo; error = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_vmi_businf_resp)); if (error) { device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", error); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; if (vmi_resp->Status != ST_OK) { device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", vmi_resp->Status); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); for (i = 0; i < businfo.BusCount; i++) { if (businfo.BusValid[i] != AAC_BUS_VALID) continue; caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim), M_AACRAIDBUF, M_NOWAIT | M_ZERO); if (caminf == NULL) { device_printf(sc->aac_dev, "No memory to add passthrough bus %d\n", i); break; } child = device_add_child(sc->aac_dev, "aacraidp", -1); if (child == NULL) { device_printf(sc->aac_dev, "device_add_child failed for passthrough bus %d\n", i); free(caminf, M_AACRAIDBUF); break; } caminf->TargetsPerBus = businfo.TargetsPerBus; caminf->BusNumber = i+1; caminf->BusType = PASSTHROUGH_BUS; caminf->InitiatorBusId = businfo.InitiatorBusId[i]; caminf->aac_sc = sc; caminf->sim_dev = child; caminf->aac_cam = NULL; device_set_ivars(child, caminf); device_set_desc(child, "SCSI Passthrough Bus"); TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); } } /* * Check to see if the kernel is up and running. If we are in a * BlinkLED state, return the BlinkLED code. */ static u_int32_t aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled) { u_int32_t ret; ret = AAC_GET_FWSTATUS(sc); if (ret & AAC_UP_AND_RUNNING) ret = 0; else if (ret & AAC_KERNEL_PANIC && bled) *bled = (ret >> 16) & 0xff; return (ret); } /* * Once do an IOP reset, basically have to re-initialize the card as * if coming up from a cold boot, and the driver is responsible for * any IO that was outstanding to the adapter at the time of the IOP * RESET. And prepare the driver for IOP RESET by making the init code * modular with the ability to call it from multiple places. */ static int aac_reset_adapter(struct aac_softc *sc) { struct aac_command *cm; struct aac_fib *fib; struct aac_pause_command *pc; u_int32_t status, reset_mask, waitCount, max_msix_orig; int msi_enabled_orig; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); if (sc->aac_state & AAC_STATE_RESET) { device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n"); return (EINVAL); } sc->aac_state |= AAC_STATE_RESET; /* disable interrupt */ AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT); /* * Abort all pending commands: * a) on the controller */ while ((cm = aac_dequeue_busy(sc)) != NULL) { cm->cm_flags |= AAC_CMD_RESET; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this * command */ wakeup(cm); } } /* b) in the waiting queues */ while ((cm = aac_dequeue_ready(sc)) != NULL) { cm->cm_flags |= AAC_CMD_RESET; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this * command */ wakeup(cm); } } /* flush drives */ if (aac_check_adapter_health(sc, NULL) == 0) { mtx_unlock(&sc->aac_io_lock); (void) aacraid_shutdown(sc->aac_dev); mtx_lock(&sc->aac_io_lock); } /* execute IOP reset */ if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) { AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST); /* We need to wait for 5 seconds before accessing the MU again * 10000 * 100us = 1000,000us = 1000ms = 1s */ waitCount = 5 * 10000; while (waitCount) { DELAY(100); /* delay 100 microseconds */ waitCount--; } } else if ((aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS, 0, 0, 0, 0, &status, &reset_mask)) != 0) { /* call IOP_RESET for older firmware */ if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0, 0, 0, 0, &status, NULL)) != 0) { if (status == AAC_SRB_STS_INVALID_REQUEST) device_printf(sc->aac_dev, "IOP_RESET not supported\n"); else /* probably timeout */ device_printf(sc->aac_dev, "IOP_RESET failed\n"); /* unwind aac_shutdown() */ aac_alloc_sync_fib(sc, &fib); pc = (struct aac_pause_command *)&fib->data[0]; pc->Command = VM_ContainerConfig; pc->Type = CT_PAUSE_IO; pc->Timeout = 1; pc->Min = 1; pc->NoRescan = 1; (void) aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof (struct aac_pause_command)); aac_release_sync_fib(sc); goto finish; } } else if (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET) { AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask); /* * We need to wait for 5 seconds before accessing the doorbell * again, 10000 * 100us = 1000,000us = 1000ms = 1s */ waitCount = 5 * 10000; while (waitCount) { DELAY(100); /* delay 100 microseconds */ waitCount--; } } /* * Initialize the adapter. */ max_msix_orig = sc->aac_max_msix; msi_enabled_orig = sc->msi_enabled; sc->msi_enabled = FALSE; if (aac_check_firmware(sc) != 0) goto finish; if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) { sc->aac_max_msix = max_msix_orig; if (msi_enabled_orig) { sc->msi_enabled = msi_enabled_orig; AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX); } mtx_unlock(&sc->aac_io_lock); aac_init(sc); mtx_lock(&sc->aac_io_lock); } finish: sc->aac_state &= ~AAC_STATE_RESET; AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT); aacraid_startio(sc); return (0); } Index: head/sys/dev/advansys/advansys.c =================================================================== --- head/sys/dev/advansys/advansys.c (revision 327948) +++ head/sys/dev/advansys/advansys.c (revision 327949) @@ -1,1404 +1,1404 @@ /*- * Generic driver for the Advanced Systems Inc. SCSI controllers * Product specific probe and attach routines can be found in: * * i386/isa/adv_isa.c ABP5140, ABP542, ABP5150, ABP842, ABP852 * pci/adv_pci.c ABP920, ABP930, ABP930U, ABP930UA, ABP940, ABP940U, * ABP940UA, ABP950, ABP960, ABP960U, ABP960UA, * ABP970, ABP970U * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1996-2000 Justin Gibbs. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Ported from: * advansys.c - Linux Host Driver for AdvanSys SCSI Adapters * * Copyright (c) 1995-1997 Advanced System Products, Inc. * All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that redistributions of source * code retain the above copyright notice and this comment without * modification. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void adv_action(struct cam_sim *sim, union ccb *ccb); static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, int error); static void adv_intr_locked(struct adv_softc *adv); static void adv_poll(struct cam_sim *sim); static void adv_run_doneq(struct adv_softc *adv); static struct adv_ccb_info * adv_alloc_ccb_info(struct adv_softc *adv); static void adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo); static __inline struct adv_ccb_info * adv_get_ccb_info(struct adv_softc *adv); static __inline void adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo); static __inline void adv_set_state(struct adv_softc *adv, adv_state state); static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb); static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb); static __inline struct adv_ccb_info * adv_get_ccb_info(struct adv_softc *adv) { struct adv_ccb_info *cinfo; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); } else { cinfo = adv_alloc_ccb_info(adv); } return (cinfo); } static __inline void adv_free_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) { if (!dumping) mtx_assert(&adv->lock, MA_OWNED); cinfo->state = ACCB_FREE; SLIST_INSERT_HEAD(&adv->free_ccb_infos, cinfo, links); } static __inline void adv_set_state(struct adv_softc *adv, adv_state state) { if (adv->state == 0) xpt_freeze_simq(adv->sim, /*count*/1); adv->state |= state; } static __inline void adv_clear_state(struct adv_softc *adv, union ccb* ccb) { if (adv->state != 0) adv_clear_state_really(adv, ccb); } static void adv_clear_state_really(struct adv_softc *adv, union ccb* ccb) { if (!dumping) mtx_assert(&adv->lock, MA_OWNED); if ((adv->state & ADV_BUSDMA_BLOCK_CLEARED) != 0) adv->state &= ~(ADV_BUSDMA_BLOCK_CLEARED|ADV_BUSDMA_BLOCK); if ((adv->state & ADV_RESOURCE_SHORTAGE) != 0) { int openings; openings = adv->max_openings - adv->cur_active - ADV_MIN_FREE_Q; if (openings >= adv->openings_needed) { adv->state &= ~ADV_RESOURCE_SHORTAGE; adv->openings_needed = 0; } } if ((adv->state & ADV_IN_TIMEOUT) != 0) { struct adv_ccb_info *cinfo; cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; if ((cinfo->state & ACCB_RECOVERY_CCB) != 0) { struct ccb_hdr *ccb_h; /* * We now traverse our list of pending CCBs * and reinstate their timeouts. */ ccb_h = LIST_FIRST(&adv->pending_ccbs); while (ccb_h != NULL) { cinfo = ccb_h->ccb_cinfo_ptr; callout_reset_sbt(&cinfo->timer, SBT_1MS * ccb_h->timeout, 0, adv_timeout, ccb_h, 0); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } adv->state &= ~ADV_IN_TIMEOUT; device_printf(adv->dev, "No longer in timeout\n"); } } if (adv->state == 0) ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } void adv_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t* physaddr; physaddr = (bus_addr_t*)arg; *physaddr = segs->ds_addr; } static void adv_action(struct cam_sim *sim, union ccb *ccb) { struct adv_softc *adv; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("adv_action\n")); adv = (struct adv_softc *)cam_sim_softc(sim); mtx_assert(&adv->lock, MA_OWNED); switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_SCSI_IO: /* Execute the requested I/O operation */ { struct ccb_hdr *ccb_h; struct ccb_scsiio *csio; struct adv_ccb_info *cinfo; int error; ccb_h = &ccb->ccb_h; csio = &ccb->csio; cinfo = adv_get_ccb_info(adv); if (cinfo == NULL) panic("XXX Handle CCB info error!!!"); ccb_h->ccb_cinfo_ptr = cinfo; cinfo->ccb = ccb; error = bus_dmamap_load_ccb(adv->buffer_dmat, cinfo->dmamap, ccb, adv_execute_ccb, csio, /*flags*/0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the controller * queue until our mapping is returned. */ adv_set_state(adv, ADV_BUSDMA_BLOCK); } break; } case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; #define IS_CURRENT_SETTINGS(c) (c->type == CTS_TYPE_CURRENT_SETTINGS) #define IS_USER_SETTINGS(c) (c->type == CTS_TYPE_USER_SETTINGS) case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; target_bit_vector targ_mask; struct adv_transinfo *tconf; u_int update_type; cts = &ccb->cts; targ_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); update_type = 0; /* * The user must specify which type of settings he wishes * to change. */ if (IS_CURRENT_SETTINGS(cts) && !IS_USER_SETTINGS(cts)) { tconf = &adv->tinfo[cts->ccb_h.target_id].current; update_type |= ADV_TRANS_GOAL; } else if (IS_USER_SETTINGS(cts) && !IS_CURRENT_SETTINGS(cts)) { tconf = &adv->tinfo[cts->ccb_h.target_id].user; update_type |= ADV_TRANS_USER; } else { ccb->ccb_h.status = CAM_REQ_INVALID; break; } scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; if ((update_type & ADV_TRANS_GOAL) != 0) { if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) adv->disc_enable |= targ_mask; else adv->disc_enable &= ~targ_mask; adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) adv->cmd_qng_enabled |= targ_mask; else adv->cmd_qng_enabled &= ~targ_mask; } } if ((update_type & ADV_TRANS_USER) != 0) { if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { if ((spi->flags & CTS_SPI_VALID_DISC) != 0) adv->user_disc_enable |= targ_mask; else adv->user_disc_enable &= ~targ_mask; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) adv->user_cmd_qng_enabled |= targ_mask; else adv->user_cmd_qng_enabled &= ~targ_mask; } } /* * If the user specifies either the sync rate, or offset, * but not both, the unspecified parameter defaults to its * current value in transfer negotiations. */ if (((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) || ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0)) { /* * If the user provided a sync rate but no offset, * use the current offset. */ if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) == 0) spi->sync_offset = tconf->offset; /* * If the user provided an offset but no sync rate, * use the current sync rate. */ if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) == 0) spi->sync_period = tconf->period; adv_period_offset_to_sdtr(adv, &spi->sync_period, &spi->sync_offset, cts->ccb_h.target_id); adv_set_syncrate(adv, /*struct cam_path */NULL, cts->ccb_h.target_id, spi->sync_period, spi->sync_offset, update_type); } ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_GET_TRAN_SETTINGS: /* Get default/user set transfer settings for the target */ { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ccb_trans_settings *cts; struct adv_transinfo *tconf; target_bit_vector target_mask; cts = &ccb->cts; target_mask = ADV_TID_TO_TARGET_MASK(cts->ccb_h.target_id); scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { tconf = &adv->tinfo[cts->ccb_h.target_id].current; if ((adv->disc_enable & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((adv->cmd_qng_enabled & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } else { tconf = &adv->tinfo[cts->ccb_h.target_id].user; if ((adv->user_disc_enable & target_mask) != 0) spi->flags |= CTS_SPI_FLAGS_DISC_ENB; if ((adv->user_cmd_qng_enabled & target_mask) != 0) scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->sync_period = tconf->period; spi->sync_offset = tconf->offset; spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; spi->valid = CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_CALC_GEOMETRY: { int extended; extended = (adv->control & ADV_CNTL_BIOS_GT_1GB) != 0; cam_calc_geometry(&ccb->ccg, extended); xpt_done(ccb); break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ { adv_stop_execution(adv); adv_reset_bus(adv, /*initiate_reset*/TRUE); adv_start_execution(adv); ccb->ccb_h.status = CAM_REQ_CMP; xpt_done(ccb); break; } case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = 7; cpi->max_lun = 7; cpi->initiator_id = adv->scsi_id; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 3300; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "Advansys", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; xpt_done(ccb); break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; xpt_done(ccb); break; } } /* * Currently, the output of bus_dmammap_load suits our needs just * fine, but should it change, we'd need to do something here. */ #define adv_fixup_dmasegs(adv, dm_segs) (struct adv_sg_entry *)(dm_segs) static void adv_execute_ccb(void *arg, bus_dma_segment_t *dm_segs, int nsegments, int error) { struct ccb_scsiio *csio; struct ccb_hdr *ccb_h; struct cam_sim *sim; struct adv_softc *adv; struct adv_ccb_info *cinfo; struct adv_scsi_q scsiq; struct adv_sg_head sghead; csio = (struct ccb_scsiio *)arg; ccb_h = &csio->ccb_h; sim = xpt_path_sim(ccb_h->path); adv = (struct adv_softc *)cam_sim_softc(sim); cinfo = (struct adv_ccb_info *)csio->ccb_h.ccb_cinfo_ptr; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); /* * Setup our done routine to release the simq on * the next ccb that completes. */ if ((adv->state & ADV_BUSDMA_BLOCK) != 0) adv->state |= ADV_BUSDMA_BLOCK_CLEARED; if ((ccb_h->flags & CAM_CDB_POINTER) != 0) { if ((ccb_h->flags & CAM_CDB_PHYS) == 0) { /* XXX Need phystovirt!!!! */ /* How about pmap_kenter??? */ scsiq.cdbptr = csio->cdb_io.cdb_ptr; } else { scsiq.cdbptr = csio->cdb_io.cdb_ptr; } } else { scsiq.cdbptr = csio->cdb_io.cdb_bytes; } /* * Build up the request */ scsiq.q1.status = 0; scsiq.q1.q_no = 0; scsiq.q1.cntl = 0; scsiq.q1.sg_queue_cnt = 0; scsiq.q1.target_id = ADV_TID_TO_TARGET_MASK(ccb_h->target_id); scsiq.q1.target_lun = ccb_h->target_lun; scsiq.q1.sense_len = csio->sense_len; scsiq.q1.extra_bytes = 0; scsiq.q2.ccb_index = cinfo - adv->ccb_infos; scsiq.q2.target_ix = ADV_TIDLUN_TO_IX(ccb_h->target_id, ccb_h->target_lun); scsiq.q2.flag = 0; scsiq.q2.cdb_len = csio->cdb_len; if ((ccb_h->flags & CAM_TAG_ACTION_VALID) != 0) scsiq.q2.tag_code = csio->tag_action; else scsiq.q2.tag_code = 0; scsiq.q2.vm_id = 0; if (nsegments != 0) { bus_dmasync_op_t op; scsiq.q1.data_addr = dm_segs->ds_addr; scsiq.q1.data_cnt = dm_segs->ds_len; if (nsegments > 1) { scsiq.q1.cntl |= QC_SG_HEAD; sghead.entry_cnt = sghead.entry_to_copy = nsegments; sghead.res = 0; sghead.sg_list = adv_fixup_dmasegs(adv, dm_segs); scsiq.sg_head = &sghead; } else { scsiq.sg_head = NULL; } if ((ccb_h->flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_PREREAD; else op = BUS_DMASYNC_PREWRITE; bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); } else { scsiq.q1.data_addr = 0; scsiq.q1.data_cnt = 0; scsiq.sg_head = NULL; } /* * Last time we need to check if this SCB needs to * be aborted. */ if (ccb_h->status != CAM_REQ_INPROG) { if (nsegments != 0) bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); adv_clear_state(adv, (union ccb *)csio); adv_free_ccb_info(adv, cinfo); xpt_done((union ccb *)csio); return; } if (adv_execute_scsi_queue(adv, &scsiq, csio->dxfer_len) != 0) { /* Temporary resource shortage */ adv_set_state(adv, ADV_RESOURCE_SHORTAGE); if (nsegments != 0) bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); csio->ccb_h.status = CAM_REQUEUE_REQ; adv_clear_state(adv, (union ccb *)csio); adv_free_ccb_info(adv, cinfo); xpt_done((union ccb *)csio); return; } cinfo->state |= ACCB_ACTIVE; ccb_h->status |= CAM_SIM_QUEUED; LIST_INSERT_HEAD(&adv->pending_ccbs, ccb_h, sim_links.le); /* Schedule our timeout */ callout_reset_sbt(&cinfo->timer, SBT_1MS * ccb_h->timeout, 0, adv_timeout, csio, 0); } static struct adv_ccb_info * adv_alloc_ccb_info(struct adv_softc *adv) { int error; struct adv_ccb_info *cinfo; cinfo = &adv->ccb_infos[adv->ccb_infos_allocated]; cinfo->state = ACCB_FREE; callout_init_mtx(&cinfo->timer, &adv->lock, 0); error = bus_dmamap_create(adv->buffer_dmat, /*flags*/0, &cinfo->dmamap); if (error != 0) { device_printf(adv->dev, "Unable to allocate CCB info " "dmamap - error %d\n", error); return (NULL); } adv->ccb_infos_allocated++; return (cinfo); } static void adv_destroy_ccb_info(struct adv_softc *adv, struct adv_ccb_info *cinfo) { callout_drain(&cinfo->timer); bus_dmamap_destroy(adv->buffer_dmat, cinfo->dmamap); } void adv_timeout(void *arg) { union ccb *ccb; struct adv_softc *adv; struct adv_ccb_info *cinfo, *cinfo2; ccb = (union ccb *)arg; adv = (struct adv_softc *)xpt_path_sim(ccb->ccb_h.path)->softc; cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; mtx_assert(&adv->lock, MA_OWNED); xpt_print_path(ccb->ccb_h.path); printf("Timed out\n"); /* Have we been taken care of already?? */ if (cinfo == NULL || cinfo->state == ACCB_FREE) { return; } adv_stop_execution(adv); if ((cinfo->state & ACCB_ABORT_QUEUED) == 0) { struct ccb_hdr *ccb_h; /* * In order to simplify the recovery process, we ask the XPT * layer to halt the queue of new transactions and we traverse * the list of pending CCBs and remove their timeouts. This * means that the driver attempts to clear only one error * condition at a time. In general, timeouts that occur * close together are related anyway, so there is no benefit * in attempting to handle errors in parallel. Timeouts will * be reinstated when the recovery process ends. */ adv_set_state(adv, ADV_IN_TIMEOUT); /* This CCB is the CCB representing our recovery actions */ cinfo->state |= ACCB_RECOVERY_CCB|ACCB_ABORT_QUEUED; ccb_h = LIST_FIRST(&adv->pending_ccbs); while (ccb_h != NULL) { cinfo2 = ccb_h->ccb_cinfo_ptr; callout_stop(&cinfo2->timer); ccb_h = LIST_NEXT(ccb_h, sim_links.le); } /* XXX Should send a BDR */ /* Attempt an abort as our first tact */ xpt_print_path(ccb->ccb_h.path); printf("Attempting abort\n"); adv_abort_ccb(adv, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, ccb, CAM_CMD_TIMEOUT, /*queued_only*/FALSE); callout_reset(&cinfo->timer, 2 * hz, adv_timeout, ccb); } else { /* Our attempt to perform an abort failed, go for a reset */ xpt_print_path(ccb->ccb_h.path); printf("Resetting bus\n"); ccb->ccb_h.status &= ~CAM_STATUS_MASK; ccb->ccb_h.status |= CAM_CMD_TIMEOUT; adv_reset_bus(adv, /*initiate_reset*/TRUE); } adv_start_execution(adv); } struct adv_softc * adv_alloc(device_t dev, struct resource *res, long offset) { struct adv_softc *adv = device_get_softc(dev); /* * Allocate a storage area for us */ LIST_INIT(&adv->pending_ccbs); SLIST_INIT(&adv->free_ccb_infos); adv->dev = dev; adv->res = res; adv->reg_off = offset; mtx_init(&adv->lock, "adv", NULL, MTX_DEF); return(adv); } void adv_free(struct adv_softc *adv) { switch (adv->init_level) { case 6: { struct adv_ccb_info *cinfo; while ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); adv_destroy_ccb_info(adv, cinfo); } bus_dmamap_unload(adv->sense_dmat, adv->sense_dmamap); } case 5: bus_dmamem_free(adv->sense_dmat, adv->sense_buffers, adv->sense_dmamap); case 4: bus_dma_tag_destroy(adv->sense_dmat); case 3: bus_dma_tag_destroy(adv->buffer_dmat); case 2: bus_dma_tag_destroy(adv->parent_dmat); case 1: if (adv->ccb_infos != NULL) free(adv->ccb_infos, M_DEVBUF); case 0: mtx_destroy(&adv->lock); break; } } int adv_init(struct adv_softc *adv) { struct adv_eeprom_config eeprom_config; int checksum, i; int max_sync; u_int16_t config_lsw; u_int16_t config_msw; mtx_lock(&adv->lock); adv_lib_init(adv); /* * Stop script execution. */ adv_write_lram_16(adv, ADV_HALTCODE_W, 0x00FE); adv_stop_execution(adv); if (adv_stop_chip(adv) == 0 || adv_is_chip_halted(adv) == 0) { mtx_unlock(&adv->lock); device_printf(adv->dev, "Unable to halt adapter. Initialization failed\n"); return (1); } ADV_OUTW(adv, ADV_REG_PROG_COUNTER, ADV_MCODE_START_ADDR); if (ADV_INW(adv, ADV_REG_PROG_COUNTER) != ADV_MCODE_START_ADDR) { mtx_unlock(&adv->lock); device_printf(adv->dev, "Unable to set program counter. Initialization failed\n"); return (1); } config_msw = ADV_INW(adv, ADV_CONFIG_MSW); config_lsw = ADV_INW(adv, ADV_CONFIG_LSW); if ((config_msw & ADV_CFG_MSW_CLR_MASK) != 0) { config_msw &= ~ADV_CFG_MSW_CLR_MASK; /* * XXX The Linux code flags this as an error, * but what should we report to the user??? * It seems that clearing the config register * makes this error recoverable. */ ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); } /* Suck in the configuration from the EEProm */ checksum = adv_get_eeprom_config(adv, &eeprom_config); if (ADV_INW(adv, ADV_CHIP_STATUS) & ADV_CSW_AUTO_CONFIG) { /* * XXX The Linux code sets a warning level for this * condition, yet nothing of meaning is printed to * the user. What does this mean??? */ if (adv->chip_version == 3) { if (eeprom_config.cfg_lsw != config_lsw) eeprom_config.cfg_lsw = config_lsw; if (eeprom_config.cfg_msw != config_msw) { eeprom_config.cfg_msw = config_msw; } } } if (checksum == eeprom_config.chksum) { /* Range/Sanity checking */ if (eeprom_config.max_total_qng < ADV_MIN_TOTAL_QNG) { eeprom_config.max_total_qng = ADV_MIN_TOTAL_QNG; } if (eeprom_config.max_total_qng > ADV_MAX_TOTAL_QNG) { eeprom_config.max_total_qng = ADV_MAX_TOTAL_QNG; } if (eeprom_config.max_tag_qng > eeprom_config.max_total_qng) { eeprom_config.max_tag_qng = eeprom_config.max_total_qng; } if (eeprom_config.max_tag_qng < ADV_MIN_TAG_Q_PER_DVC) { eeprom_config.max_tag_qng = ADV_MIN_TAG_Q_PER_DVC; } adv->max_openings = eeprom_config.max_total_qng; adv->user_disc_enable = eeprom_config.disc_enable; adv->user_cmd_qng_enabled = eeprom_config.use_cmd_qng; adv->isa_dma_speed = EEPROM_DMA_SPEED(eeprom_config); adv->scsi_id = EEPROM_SCSIID(eeprom_config) & ADV_MAX_TID; EEPROM_SET_SCSIID(eeprom_config, adv->scsi_id); adv->control = eeprom_config.cntl; for (i = 0; i <= ADV_MAX_TID; i++) { u_int8_t sync_data; if ((eeprom_config.init_sdtr & (0x1 << i)) == 0) sync_data = 0; else sync_data = eeprom_config.sdtr_data[i]; adv_sdtr_to_period_offset(adv, sync_data, &adv->tinfo[i].user.period, &adv->tinfo[i].user.offset, i); } config_lsw = eeprom_config.cfg_lsw; eeprom_config.cfg_msw = config_msw; } else { u_int8_t sync_data; device_printf(adv->dev, "Warning EEPROM Checksum mismatch. " "Using default device parameters\n"); /* Set reasonable defaults since we can't read the EEPROM */ adv->isa_dma_speed = /*ADV_DEF_ISA_DMA_SPEED*/1; adv->max_openings = ADV_DEF_MAX_TOTAL_QNG; adv->disc_enable = TARGET_BIT_VECTOR_SET; adv->user_disc_enable = TARGET_BIT_VECTOR_SET; adv->cmd_qng_enabled = TARGET_BIT_VECTOR_SET; adv->user_cmd_qng_enabled = TARGET_BIT_VECTOR_SET; adv->scsi_id = 7; adv->control = 0xFFFF; if (adv->chip_version == ADV_CHIP_VER_PCI_ULTRA_3050) /* Default to no Ultra to support the 3030 */ adv->control &= ~ADV_CNTL_SDTR_ENABLE_ULTRA; sync_data = ADV_DEF_SDTR_OFFSET | (ADV_DEF_SDTR_INDEX << 4); for (i = 0; i <= ADV_MAX_TID; i++) { adv_sdtr_to_period_offset(adv, sync_data, &adv->tinfo[i].user.period, &adv->tinfo[i].user.offset, i); } config_lsw |= ADV_CFG_LSW_SCSI_PARITY_ON; } config_msw &= ~ADV_CFG_MSW_CLR_MASK; config_lsw |= ADV_CFG_LSW_HOST_INT_ON; if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA) && (adv->control & ADV_CNTL_SDTR_ENABLE_ULTRA) == 0) /* 25ns or 10MHz */ max_sync = 25; else /* Unlimited */ max_sync = 0; for (i = 0; i <= ADV_MAX_TID; i++) { if (adv->tinfo[i].user.period < max_sync) adv->tinfo[i].user.period = max_sync; } if (adv_test_external_lram(adv) == 0) { if ((adv->type & (ADV_PCI|ADV_ULTRA)) == (ADV_PCI|ADV_ULTRA)) { eeprom_config.max_total_qng = ADV_MAX_PCI_ULTRA_INRAM_TOTAL_QNG; eeprom_config.max_tag_qng = ADV_MAX_PCI_ULTRA_INRAM_TAG_QNG; } else { eeprom_config.cfg_msw |= 0x0800; config_msw |= 0x0800; eeprom_config.max_total_qng = ADV_MAX_PCI_INRAM_TOTAL_QNG; eeprom_config.max_tag_qng = ADV_MAX_INRAM_TAG_QNG; } adv->max_openings = eeprom_config.max_total_qng; } ADV_OUTW(adv, ADV_CONFIG_MSW, config_msw); ADV_OUTW(adv, ADV_CONFIG_LSW, config_lsw); #if 0 /* * Don't write the eeprom data back for now. * I'd rather not mess up the user's card. We also don't * fully sanitize the eeprom settings above for the write-back * to be 100% correct. */ if (adv_set_eeprom_config(adv, &eeprom_config) != 0) device_printf(adv->dev, "WARNING! Failure writing to EEPROM.\n"); #endif adv_set_chip_scsiid(adv, adv->scsi_id); if (adv_init_lram_and_mcode(adv)) { mtx_unlock(&adv->lock); return (1); } adv->disc_enable = adv->user_disc_enable; adv_write_lram_8(adv, ADVV_DISC_ENABLE_B, adv->disc_enable); for (i = 0; i <= ADV_MAX_TID; i++) { /* * Start off in async mode. */ adv_set_syncrate(adv, /*struct cam_path */NULL, i, /*period*/0, /*offset*/0, ADV_TRANS_CUR); /* * Enable the use of tagged commands on all targets. * This allows the kernel driver to make up it's own mind * as it sees fit to tag queue instead of having the * firmware try and second guess the tag_code settins. */ adv_write_lram_8(adv, ADVV_MAX_DVC_QNG_BEG + i, adv->max_openings); } adv_write_lram_8(adv, ADVV_USE_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); adv_write_lram_8(adv, ADVV_CAN_TAGGED_QNG_B, TARGET_BIT_VECTOR_SET); device_printf(adv->dev, "AdvanSys %s Host Adapter, SCSI ID %d, queue depth %d\n", (adv->type & ADV_ULTRA) && (max_sync == 0) ? "Ultra SCSI" : "SCSI", adv->scsi_id, adv->max_openings); mtx_unlock(&adv->lock); return (0); } void adv_intr(void *arg) { struct adv_softc *adv; adv = arg; mtx_lock(&adv->lock); adv_intr_locked(adv); mtx_unlock(&adv->lock); } void adv_intr_locked(struct adv_softc *adv) { u_int16_t chipstat; u_int16_t saved_ram_addr; u_int8_t ctrl_reg; u_int8_t saved_ctrl_reg; u_int8_t host_flag; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); chipstat = ADV_INW(adv, ADV_CHIP_STATUS); /* Is it for us? */ if ((chipstat & (ADV_CSW_INT_PENDING|ADV_CSW_SCSI_RESET_LATCH)) == 0) return; ctrl_reg = ADV_INB(adv, ADV_CHIP_CTRL); saved_ctrl_reg = ctrl_reg & (~(ADV_CC_SCSI_RESET | ADV_CC_CHIP_RESET | ADV_CC_SINGLE_STEP | ADV_CC_DIAG | ADV_CC_TEST)); if ((chipstat & (ADV_CSW_SCSI_RESET_LATCH|ADV_CSW_SCSI_RESET_ACTIVE))) { device_printf(adv->dev, "Detected Bus Reset\n"); adv_reset_bus(adv, /*initiate_reset*/FALSE); return; } if ((chipstat & ADV_CSW_INT_PENDING) != 0) { saved_ram_addr = ADV_INW(adv, ADV_LRAM_ADDR); host_flag = adv_read_lram_8(adv, ADVV_HOST_FLAG_B); adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag | ADV_HOST_FLAG_IN_ISR); adv_ack_interrupt(adv); if ((chipstat & ADV_CSW_HALTED) != 0 && (ctrl_reg & ADV_CC_SINGLE_STEP) != 0) { adv_isr_chip_halted(adv); saved_ctrl_reg &= ~ADV_CC_HALT; } else { adv_run_doneq(adv); } ADV_OUTW(adv, ADV_LRAM_ADDR, saved_ram_addr); #ifdef DIAGNOSTIC if (ADV_INW(adv, ADV_LRAM_ADDR) != saved_ram_addr) panic("adv_intr: Unable to set LRAM addr"); #endif adv_write_lram_8(adv, ADVV_HOST_FLAG_B, host_flag); } ADV_OUTB(adv, ADV_CHIP_CTRL, saved_ctrl_reg); } static void adv_run_doneq(struct adv_softc *adv) { struct adv_q_done_info scsiq; u_int doneq_head; u_int done_qno; doneq_head = adv_read_lram_16(adv, ADVV_DONE_Q_TAIL_W) & 0xFF; done_qno = adv_read_lram_8(adv, ADV_QNO_TO_QADDR(doneq_head) + ADV_SCSIQ_B_FWD); while (done_qno != ADV_QLINK_END) { union ccb* ccb; struct adv_ccb_info *cinfo; u_int done_qaddr; u_int sg_queue_cnt; done_qaddr = ADV_QNO_TO_QADDR(done_qno); /* Pull status from this request */ sg_queue_cnt = adv_copy_lram_doneq(adv, done_qaddr, &scsiq, adv->max_dma_count); /* Mark it as free */ adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS, scsiq.q_status & ~(QS_READY|QS_ABORTED)); /* Process request based on retrieved info */ if ((scsiq.cntl & QC_SG_HEAD) != 0) { u_int i; /* * S/G based request. Free all of the queue * structures that contained S/G information. */ for (i = 0; i < sg_queue_cnt; i++) { done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD); #ifdef DIAGNOSTIC if (done_qno == ADV_QLINK_END) { panic("adv_qdone: Corrupted SG " "list encountered"); } #endif done_qaddr = ADV_QNO_TO_QADDR(done_qno); /* Mark SG queue as free */ adv_write_lram_8(adv, done_qaddr + ADV_SCSIQ_B_STATUS, QS_FREE); } } else sg_queue_cnt = 0; #ifdef DIAGNOSTIC if (adv->cur_active < (sg_queue_cnt + 1)) panic("adv_qdone: Attempting to free more " "queues than are active"); #endif adv->cur_active -= sg_queue_cnt + 1; if ((scsiq.q_status != QS_DONE) && (scsiq.q_status & QS_ABORTED) == 0) panic("adv_qdone: completed scsiq with unknown status"); scsiq.remain_bytes += scsiq.extra_bytes; if ((scsiq.d3.done_stat == QD_WITH_ERROR) && (scsiq.d3.host_stat == QHSTA_M_DATA_OVER_RUN)) { if ((scsiq.cntl & (QC_DATA_IN|QC_DATA_OUT)) == 0) { scsiq.d3.done_stat = QD_NO_ERROR; scsiq.d3.host_stat = QHSTA_NO_ERROR; } } cinfo = &adv->ccb_infos[scsiq.d2.ccb_index]; ccb = cinfo->ccb; ccb->csio.resid = scsiq.remain_bytes; adv_done(adv, ccb, scsiq.d3.done_stat, scsiq.d3.host_stat, scsiq.d3.scsi_stat, scsiq.q_no); doneq_head = done_qno; done_qno = adv_read_lram_8(adv, done_qaddr + ADV_SCSIQ_B_FWD); } adv_write_lram_16(adv, ADVV_DONE_Q_TAIL_W, doneq_head); } void adv_done(struct adv_softc *adv, union ccb *ccb, u_int done_stat, u_int host_stat, u_int scsi_status, u_int q_no) { struct adv_ccb_info *cinfo; if (!dumping) mtx_assert(&adv->lock, MA_OWNED); cinfo = (struct adv_ccb_info *)ccb->ccb_h.ccb_cinfo_ptr; LIST_REMOVE(&ccb->ccb_h, sim_links.le); callout_stop(&cinfo->timer); if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(adv->buffer_dmat, cinfo->dmamap, op); bus_dmamap_unload(adv->buffer_dmat, cinfo->dmamap); } switch (done_stat) { case QD_NO_ERROR: if (host_stat == QHSTA_NO_ERROR) { ccb->ccb_h.status = CAM_REQ_CMP; break; } xpt_print_path(ccb->ccb_h.path); printf("adv_done - queue done without error, " "but host status non-zero(%x)\n", host_stat); /*FALLTHROUGH*/ case QD_WITH_ERROR: switch (host_stat) { case QHSTA_M_TARGET_STATUS_BUSY: case QHSTA_M_BAD_QUEUE_FULL_OR_BUSY: /* * Assume that if we were a tagged transaction * the target reported queue full. Otherwise, * report busy. The firmware really should just * pass the original status back up to us even * if it thinks the target was in error for * returning this status as no other transactions * from this initiator are in effect, but this * ignores multi-initiator setups and there is * evidence that the firmware gets its per-device * transaction counts screwed up occasionally. */ ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0 && host_stat != QHSTA_M_TARGET_STATUS_BUSY) scsi_status = SCSI_STATUS_QUEUE_FULL; else scsi_status = SCSI_STATUS_BUSY; adv_abort_ccb(adv, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, /*ccb*/NULL, CAM_REQUEUE_REQ, /*queued_only*/TRUE); /*FALLTHROUGH*/ case QHSTA_M_NO_AUTO_REQ_SENSE: case QHSTA_NO_ERROR: ccb->csio.scsi_status = scsi_status; switch (scsi_status) { case SCSI_STATUS_CHECK_COND: case SCSI_STATUS_CMD_TERMINATED: ccb->ccb_h.status |= CAM_AUTOSNS_VALID; /* Structure copy */ ccb->csio.sense_data = adv->sense_buffers[q_no - 1]; /* FALLTHROUGH */ case SCSI_STATUS_BUSY: case SCSI_STATUS_RESERV_CONFLICT: case SCSI_STATUS_QUEUE_FULL: case SCSI_STATUS_COND_MET: case SCSI_STATUS_INTERMED: case SCSI_STATUS_INTERMED_COND_MET: ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; break; case SCSI_STATUS_OK: ccb->ccb_h.status |= CAM_REQ_CMP; break; } break; case QHSTA_M_SEL_TIMEOUT: ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; case QHSTA_M_DATA_OVER_RUN: ccb->ccb_h.status = CAM_DATA_RUN_ERR; break; case QHSTA_M_UNEXPECTED_BUS_FREE: ccb->ccb_h.status = CAM_UNEXP_BUSFREE; break; case QHSTA_M_BAD_BUS_PHASE_SEQ: ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case QHSTA_M_BAD_CMPL_STATUS_IN: /* No command complete after a status message */ ccb->ccb_h.status = CAM_SEQUENCE_FAIL; break; case QHSTA_D_EXE_SCSI_Q_BUSY_TIMEOUT: case QHSTA_M_WTM_TIMEOUT: case QHSTA_M_HUNG_REQ_SCSI_BUS_RESET: /* The SCSI bus hung in a phase */ ccb->ccb_h.status = CAM_SEQUENCE_FAIL; adv_reset_bus(adv, /*initiate_reset*/TRUE); break; case QHSTA_M_AUTO_REQ_SENSE_FAIL: ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; break; case QHSTA_D_QDONE_SG_LIST_CORRUPTED: case QHSTA_D_ASC_DVC_ERROR_CODE_SET: case QHSTA_D_HOST_ABORT_FAILED: case QHSTA_D_EXE_SCSI_Q_FAILED: case QHSTA_D_ASPI_NO_BUF_POOL: case QHSTA_M_BAD_TAG_CODE: case QHSTA_D_LRAM_CMP_ERROR: case QHSTA_M_MICRO_CODE_ERROR_HALT: default: panic("%s: Unhandled Host status error %x", device_get_nameunit(adv->dev), host_stat); /* NOTREACHED */ } break; case QD_ABORTED_BY_HOST: /* Don't clobber any, more explicit, error codes we've set */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) ccb->ccb_h.status = CAM_REQ_ABORTED; break; default: xpt_print_path(ccb->ccb_h.path); printf("adv_done - queue done with unknown status %x:%x\n", done_stat, host_stat); ccb->ccb_h.status = CAM_REQ_CMP_ERR; break; } adv_clear_state(adv, ccb); if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && (ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { xpt_freeze_devq(ccb->ccb_h.path, /*count*/1); ccb->ccb_h.status |= CAM_DEV_QFRZN; } adv_free_ccb_info(adv, cinfo); /* * Null this out so that we catch driver bugs that cause a * ccb to be completed twice. */ ccb->ccb_h.ccb_cinfo_ptr = NULL; ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); } /* * Function to poll for command completion when * interrupts are disabled (crash dumps) */ static void adv_poll(struct cam_sim *sim) { adv_intr_locked(cam_sim_softc(sim)); } /* * Attach all the sub-devices we can find */ int adv_attach(adv) struct adv_softc *adv; { struct ccb_setasync csa; struct cam_devq *devq; int max_sg; /* * Allocate an array of ccb mapping structures. We put the * index of the ccb_info structure into the queue representing * a transaction and use it for mapping the queue to the * upper level SCSI transaction it represents. */ - adv->ccb_infos = malloc(sizeof(*adv->ccb_infos) * adv->max_openings, + adv->ccb_infos = mallocarray(adv->max_openings, sizeof(*adv->ccb_infos), M_DEVBUF, M_NOWAIT); if (adv->ccb_infos == NULL) return (ENOMEM); adv->init_level++; /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. * * The ASC boards use chains of "queues" (the transactional * resources on the board) to represent long S/G lists. * The first queue represents the command and holds a * single address and data pair. The queues that follow * can each hold ADV_SG_LIST_PER_Q entries. Given the * total number of queues, we can express the largest * transaction we can map. We reserve a few queues for * error recovery. Take those into account as well. * * There is a way to take an interrupt to download the * next batch of S/G entries if there are more than 255 * of them (the counter in the queue structure is a u_int8_t). * We don't use this feature, so limit the S/G list size * accordingly. */ max_sg = (adv->max_openings - ADV_MIN_FREE_Q - 1) * ADV_SG_LIST_PER_Q; if (max_sg > 255) max_sg = 255; /* DMA tag for mapping buffers into device visible space. */ if (bus_dma_tag_create( /* parent */ adv->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ ADV_MAXPHYS, /* nsegments */ max_sg, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ BUS_DMA_ALLOCNOW, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &adv->lock, &adv->buffer_dmat) != 0) { return (ENXIO); } adv->init_level++; /* DMA tag for our sense buffers */ if (bus_dma_tag_create( /* parent */ adv->parent_dmat, /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ sizeof(struct scsi_sense_data) * adv->max_openings, /* nsegments */ 1, /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ 0, /* lockfunc */ busdma_lock_mutex, /* lockarg */ &adv->lock, &adv->sense_dmat) != 0) { return (ENXIO); } adv->init_level++; /* Allocation for our sense buffers */ if (bus_dmamem_alloc(adv->sense_dmat, (void **)&adv->sense_buffers, BUS_DMA_NOWAIT, &adv->sense_dmamap) != 0) { return (ENOMEM); } adv->init_level++; /* And permanently map them */ bus_dmamap_load(adv->sense_dmat, adv->sense_dmamap, adv->sense_buffers, sizeof(struct scsi_sense_data)*adv->max_openings, adv_map, &adv->sense_physbase, /*flags*/0); adv->init_level++; /* * Fire up the chip */ if (adv_start_chip(adv) != 1) { device_printf(adv->dev, "Unable to start on board processor. Aborting.\n"); return (ENXIO); } /* * Create the device queue for our SIM. */ devq = cam_simq_alloc(adv->max_openings); if (devq == NULL) return (ENOMEM); /* * Construct our SIM entry. */ adv->sim = cam_sim_alloc(adv_action, adv_poll, "adv", adv, device_get_unit(adv->dev), &adv->lock, 1, adv->max_openings, devq); if (adv->sim == NULL) return (ENOMEM); /* * Register the bus. */ mtx_lock(&adv->lock); if (xpt_bus_register(adv->sim, adv->dev, 0) != CAM_SUCCESS) { cam_sim_free(adv->sim, /*free devq*/TRUE); mtx_unlock(&adv->lock); return (ENXIO); } if (xpt_create_path(&adv->path, /*periph*/NULL, cam_sim_path(adv->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { xpt_bus_deregister(cam_sim_path(adv->sim)); cam_sim_free(adv->sim, /*free devq*/TRUE); mtx_unlock(&adv->lock); return (ENXIO); } xpt_setup_ccb(&csa.ccb_h, adv->path, /*priority*/5); csa.ccb_h.func_code = XPT_SASYNC_CB; csa.event_enable = AC_FOUND_DEVICE|AC_LOST_DEVICE; csa.callback = advasync; csa.callback_arg = adv; xpt_action((union ccb *)&csa); mtx_unlock(&adv->lock); return (0); } MODULE_DEPEND(adv, cam, 1, 1, 1); Index: head/sys/dev/ath/if_ath_rx_edma.c =================================================================== --- head/sys/dev/ath/if_ath_rx_edma.c (revision 327948) +++ head/sys/dev/ath/if_ath_rx_edma.c (revision 327949) @@ -1,1010 +1,1009 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2012 Adrian Chadd * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * Driver for the Atheros Wireless LAN controller. * * This software is derived from work of Atsushi Onoe; his contribution * is greatly appreciated. */ #include "opt_inet.h" #include "opt_ath.h" /* * This is needed for register operations which are performed * by the driver - eg, calls to ath_hal_gettsf32(). * * It's also required for any AH_DEBUG checks in here, eg the * module dependencies. */ #include "opt_ah.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for mp_ncpus */ #include #include #include #include #include #include #include #include #include #include #include #ifdef IEEE80211_SUPPORT_SUPERG #include #endif #ifdef IEEE80211_SUPPORT_TDMA #include #endif #include #ifdef INET #include #include #endif #include #include /* XXX for softled */ #include #include #include #include #include #include #include #include #include #include #include #include #ifdef ATH_TX99_DIAG #include #endif #include #ifdef ATH_DEBUG_ALQ #include #endif /* * some general macros */ #define INCR(_l, _sz) (_l) ++; (_l) &= ((_sz) - 1) #define DECR(_l, _sz) (_l) --; (_l) &= ((_sz) - 1) MALLOC_DECLARE(M_ATHDEV); /* * XXX TODO: * * + Make sure the FIFO is correctly flushed and reinitialised * through a reset; * + Verify multi-descriptor frames work! * + There's a "memory use after free" which needs to be tracked down * and fixed ASAP. I've seen this in the legacy path too, so it * may be a generic RX path issue. */ /* * XXX shuffle the function orders so these pre-declarations aren't * required! */ static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs); static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype); static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf); static void ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, int dosched); static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, int dosched); static void ath_edma_stoprecv(struct ath_softc *sc, int dodelay) { struct ath_hal *ah = sc->sc_ah; ATH_RX_LOCK(sc); ath_hal_stoppcurecv(ah); ath_hal_setrxfilter(ah, 0); /* * */ if (ath_hal_stopdmarecv(ah) == AH_TRUE) sc->sc_rx_stopped = 1; /* * Give the various bus FIFOs (not EDMA descriptor FIFO) * time to finish flushing out data. */ DELAY(3000); /* Flush RX pending for each queue */ /* XXX should generic-ify this */ if (sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending) { m_freem(sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending); sc->sc_rxedma[HAL_RX_QUEUE_HP].m_rxpending = NULL; } if (sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending) { m_freem(sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending); sc->sc_rxedma[HAL_RX_QUEUE_LP].m_rxpending = NULL; } ATH_RX_UNLOCK(sc); } /* * Re-initialise the FIFO given the current buffer contents. * Specifically, walk from head -> tail, pushing the FIFO contents * back into the FIFO. */ static void ath_edma_reinit_fifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) { struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; struct ath_buf *bf; int i, j; ATH_RX_LOCK_ASSERT(sc); i = re->m_fifo_head; for (j = 0; j < re->m_fifo_depth; j++) { bf = re->m_fifo[i]; DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: pos=%i, addr=0x%jx\n", __func__, qtype, i, (uintmax_t)bf->bf_daddr); ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); INCR(i, re->m_fifolen); } /* Ensure this worked out right */ if (i != re->m_fifo_tail) { device_printf(sc->sc_dev, "%s: i (%d) != tail! (%d)\n", __func__, i, re->m_fifo_tail); } } /* * Start receive. */ static int ath_edma_startrecv(struct ath_softc *sc) { struct ath_hal *ah = sc->sc_ah; ATH_RX_LOCK(sc); /* * Sanity check - are we being called whilst RX * isn't stopped? If so, we may end up pushing * too many entries into the RX FIFO and * badness occurs. */ /* Enable RX FIFO */ ath_hal_rxena(ah); /* * In theory the hardware has been initialised, right? */ if (sc->sc_rx_resetted == 1) { DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Re-initing HP FIFO\n", __func__); ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_HP); DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Re-initing LP FIFO\n", __func__); ath_edma_reinit_fifo(sc, HAL_RX_QUEUE_LP); sc->sc_rx_resetted = 0; } else { device_printf(sc->sc_dev, "%s: called without resetting chip?\n", __func__); } /* Add up to m_fifolen entries in each queue */ /* * These must occur after the above write so the FIFO buffers * are pushed/tracked in the same order as the hardware will * process them. * * XXX TODO: is this really necessary? We should've stopped * the hardware already and reinitialised it, so it's a no-op. */ ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_HP, sc->sc_rxedma[HAL_RX_QUEUE_HP].m_fifolen); ath_edma_rxfifo_alloc(sc, HAL_RX_QUEUE_LP, sc->sc_rxedma[HAL_RX_QUEUE_LP].m_fifolen); ath_mode_init(sc); ath_hal_startpcurecv(ah); /* * We're now doing RX DMA! */ sc->sc_rx_stopped = 0; ATH_RX_UNLOCK(sc); return (0); } static void ath_edma_recv_sched_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, int dosched) { ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ath_edma_recv_proc_queue(sc, qtype, dosched); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); } static void ath_edma_recv_sched(struct ath_softc *sc, int dosched) { ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, dosched); ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, dosched); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); taskqueue_enqueue(sc->sc_tq, &sc->sc_rxtask); } static void ath_edma_recv_flush(struct ath_softc *sc) { DPRINTF(sc, ATH_DEBUG_RECV, "%s: called\n", __func__); ATH_PCU_LOCK(sc); sc->sc_rxproc_cnt++; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); /* * Flush any active frames from FIFO -> deferred list */ ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 0); ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 0); /* * Process what's in the deferred queue */ /* * XXX: If we read the tsf/channoise here and then pass it in, * we could restore the power state before processing * the deferred queue. */ ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 0); ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 0); ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); ATH_PCU_LOCK(sc); sc->sc_rxproc_cnt--; ATH_PCU_UNLOCK(sc); } /* * Process frames from the current queue into the deferred queue. */ static void ath_edma_recv_proc_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, int dosched) { struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; struct ath_rx_status *rs; struct ath_desc *ds; struct ath_buf *bf; struct mbuf *m; struct ath_hal *ah = sc->sc_ah; uint64_t tsf; uint16_t nf; int npkts = 0; tsf = ath_hal_gettsf64(ah); nf = ath_hal_getchannoise(ah, sc->sc_curchan); sc->sc_stats.ast_rx_noise = nf; ATH_RX_LOCK(sc); #if 1 if (sc->sc_rx_resetted == 1) { /* * XXX We shouldn't ever be scheduled if * receive has been stopped - so complain * loudly! */ device_printf(sc->sc_dev, "%s: sc_rx_resetted=1! Bad!\n", __func__); ATH_RX_UNLOCK(sc); return; } #endif do { bf = re->m_fifo[re->m_fifo_head]; /* This shouldn't occur! */ if (bf == NULL) { device_printf(sc->sc_dev, "%s: Q%d: NULL bf?\n", __func__, qtype); break; } m = bf->bf_m; ds = bf->bf_desc; /* * Sync descriptor memory - this also syncs the buffer for us. * EDMA descriptors are in cached memory. */ bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); rs = &bf->bf_status.ds_rxstat; bf->bf_rxstatus = ath_hal_rxprocdesc(ah, ds, bf->bf_daddr, NULL, rs); if (bf->bf_rxstatus == HAL_EINPROGRESS) break; #ifdef ATH_DEBUG if (sc->sc_debug & ATH_DEBUG_RECV_DESC) ath_printrxbuf(sc, bf, 0, bf->bf_rxstatus == HAL_OK); #endif /* ATH_DEBUG */ #ifdef ATH_DEBUG_ALQ if (if_ath_alq_checkdebug(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS)) if_ath_alq_post(&sc->sc_alq, ATH_ALQ_EDMA_RXSTATUS, sc->sc_rx_statuslen, (char *) ds); #endif /* ATH_DEBUG */ /* * Completed descriptor. */ DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: completed!\n", __func__, qtype); npkts++; /* * We've been synced already, so unmap. */ bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); /* * Remove the FIFO entry and place it on the completion * queue. */ re->m_fifo[re->m_fifo_head] = NULL; TAILQ_INSERT_TAIL(&sc->sc_rx_rxlist[qtype], bf, bf_list); /* Bump the descriptor FIFO stats */ INCR(re->m_fifo_head, re->m_fifolen); re->m_fifo_depth--; /* XXX check it doesn't fall below 0 */ } while (re->m_fifo_depth > 0); /* Append some more fresh frames to the FIFO */ if (dosched) ath_edma_rxfifo_alloc(sc, qtype, re->m_fifolen); ATH_RX_UNLOCK(sc); /* rx signal state monitoring */ ath_hal_rxmonitor(ah, &sc->sc_halstats, sc->sc_curchan); ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath edma rx proc: npkts=%d\n", npkts); return; } /* * Flush the deferred queue. * * This destructively flushes the deferred queue - it doesn't * call the wireless stack on each mbuf. */ static void ath_edma_flush_deferred_queue(struct ath_softc *sc) { struct ath_buf *bf; ATH_RX_LOCK_ASSERT(sc); /* Free in one set, inside the lock */ while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP])) { bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP]); TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_LP], bf, bf_list); /* Free the buffer/mbuf */ ath_edma_rxbuf_free(sc, bf); } while (! TAILQ_EMPTY(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP])) { bf = TAILQ_FIRST(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP]); TAILQ_REMOVE(&sc->sc_rx_rxlist[HAL_RX_QUEUE_HP], bf, bf_list); /* Free the buffer/mbuf */ ath_edma_rxbuf_free(sc, bf); } } static int ath_edma_recv_proc_deferred_queue(struct ath_softc *sc, HAL_RX_QUEUE qtype, int dosched) { int ngood = 0; uint64_t tsf; struct ath_buf *bf, *next; struct ath_rx_status *rs; int16_t nf; ath_bufhead rxlist; struct mbuf *m; TAILQ_INIT(&rxlist); nf = ath_hal_getchannoise(sc->sc_ah, sc->sc_curchan); /* * XXX TODO: the NF/TSF should be stamped on the bufs themselves, * otherwise we may end up adding in the wrong values if this * is delayed too far.. */ tsf = ath_hal_gettsf64(sc->sc_ah); /* Copy the list over */ ATH_RX_LOCK(sc); TAILQ_CONCAT(&rxlist, &sc->sc_rx_rxlist[qtype], bf_list); ATH_RX_UNLOCK(sc); /* Handle the completed descriptors */ /* * XXX is this SAFE call needed? The ath_buf entries * aren't modified by ath_rx_pkt, right? */ TAILQ_FOREACH_SAFE(bf, &rxlist, bf_list, next) { /* * Skip the RX descriptor status - start at the data offset */ m_adj(bf->bf_m, sc->sc_rx_statuslen); /* Handle the frame */ rs = &bf->bf_status.ds_rxstat; m = bf->bf_m; bf->bf_m = NULL; if (ath_rx_pkt(sc, rs, bf->bf_rxstatus, tsf, nf, qtype, bf, m)) ngood++; } if (ngood) { sc->sc_lastrx = tsf; } ATH_KTR(sc, ATH_KTR_INTERRUPTS, 1, "ath edma rx deferred proc: ngood=%d\n", ngood); /* Free in one set, inside the lock */ ATH_RX_LOCK(sc); while (! TAILQ_EMPTY(&rxlist)) { bf = TAILQ_FIRST(&rxlist); TAILQ_REMOVE(&rxlist, bf, bf_list); /* Free the buffer/mbuf */ ath_edma_rxbuf_free(sc, bf); } ATH_RX_UNLOCK(sc); return (ngood); } static void ath_edma_recv_tasklet(void *arg, int npending) { struct ath_softc *sc = (struct ath_softc *) arg; #ifdef IEEE80211_SUPPORT_SUPERG struct ieee80211com *ic = &sc->sc_ic; #endif DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: called; npending=%d\n", __func__, npending); ATH_PCU_LOCK(sc); if (sc->sc_inreset_cnt > 0) { device_printf(sc->sc_dev, "%s: sc_inreset_cnt > 0; skipping\n", __func__); ATH_PCU_UNLOCK(sc); return; } sc->sc_rxproc_cnt++; ATH_PCU_UNLOCK(sc); ATH_LOCK(sc); ath_power_set_power_state(sc, HAL_PM_AWAKE); ATH_UNLOCK(sc); ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_HP, 1); ath_edma_recv_proc_queue(sc, HAL_RX_QUEUE_LP, 1); ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_HP, 1); ath_edma_recv_proc_deferred_queue(sc, HAL_RX_QUEUE_LP, 1); /* * XXX: If we read the tsf/channoise here and then pass it in, * we could restore the power state before processing * the deferred queue. */ ATH_LOCK(sc); ath_power_restore_power_state(sc); ATH_UNLOCK(sc); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_ff_age_all(ic, 100); #endif if (ath_dfs_tasklet_needed(sc, sc->sc_curchan)) taskqueue_enqueue(sc->sc_tq, &sc->sc_dfstask); ATH_PCU_LOCK(sc); sc->sc_rxproc_cnt--; ATH_PCU_UNLOCK(sc); } /* * Allocate an RX mbuf for the given ath_buf and initialise * it for EDMA. * * + Allocate a 4KB mbuf; * + Setup the DMA map for the given buffer; * + Return that. */ static int ath_edma_rxbuf_init(struct ath_softc *sc, struct ath_buf *bf) { struct mbuf *m; int error; int len; ATH_RX_LOCK_ASSERT(sc); m = m_getm(NULL, sc->sc_edma_bufsize, M_NOWAIT, MT_DATA); if (! m) return (ENOBUFS); /* XXX ?*/ /* XXX warn/enforce alignment */ len = m->m_ext.ext_size; #if 0 device_printf(sc->sc_dev, "%s: called: m=%p, size=%d, mtod=%p\n", __func__, m, len, mtod(m, char *)); #endif m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; /* * Populate ath_buf fields. */ bf->bf_desc = mtod(m, struct ath_desc *); bf->bf_lastds = bf->bf_desc; /* XXX only really for TX? */ bf->bf_m = m; /* * Zero the descriptor and ensure it makes it out to the * bounce buffer if one is required. * * XXX PREWRITE will copy the whole buffer; we only needed it * to sync the first 32 DWORDS. Oh well. */ memset(bf->bf_desc, '\0', sc->sc_rx_statuslen); /* * Create DMA mapping. */ error = bus_dmamap_load_mbuf_sg(sc->sc_dmat, bf->bf_dmamap, m, bf->bf_segs, &bf->bf_nseg, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "%s: failed; error=%d\n", __func__, error); m_freem(m); return (error); } /* * Set daddr to the physical mapping page. */ bf->bf_daddr = bf->bf_segs[0].ds_addr; /* * Prepare for the upcoming read. * * We need to both sync some data into the buffer (the zero'ed * descriptor payload) and also prepare for the read that's going * to occur. */ bus_dmamap_sync(sc->sc_dmat, bf->bf_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* Finish! */ return (0); } /* * Allocate a RX buffer. */ static struct ath_buf * ath_edma_rxbuf_alloc(struct ath_softc *sc) { struct ath_buf *bf; int error; ATH_RX_LOCK_ASSERT(sc); /* Allocate buffer */ bf = TAILQ_FIRST(&sc->sc_rxbuf); /* XXX shouldn't happen upon startup? */ if (bf == NULL) { device_printf(sc->sc_dev, "%s: nothing on rxbuf?!\n", __func__); return (NULL); } /* Remove it from the free list */ TAILQ_REMOVE(&sc->sc_rxbuf, bf, bf_list); /* Assign RX mbuf to it */ error = ath_edma_rxbuf_init(sc, bf); if (error != 0) { device_printf(sc->sc_dev, "%s: bf=%p, rxbuf alloc failed! error=%d\n", __func__, bf, error); TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); return (NULL); } return (bf); } static void ath_edma_rxbuf_free(struct ath_softc *sc, struct ath_buf *bf) { ATH_RX_LOCK_ASSERT(sc); /* * Only unload the frame if we haven't consumed * the mbuf via ath_rx_pkt(). */ if (bf->bf_m) { bus_dmamap_unload(sc->sc_dmat, bf->bf_dmamap); m_freem(bf->bf_m); bf->bf_m = NULL; } /* XXX lock? */ TAILQ_INSERT_TAIL(&sc->sc_rxbuf, bf, bf_list); } /* * Allocate up to 'n' entries and push them onto the hardware FIFO. * * Return how many entries were successfully pushed onto the * FIFO. */ static int ath_edma_rxfifo_alloc(struct ath_softc *sc, HAL_RX_QUEUE qtype, int nbufs) { struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; struct ath_buf *bf; int i; ATH_RX_LOCK_ASSERT(sc); /* * Allocate buffers until the FIFO is full or nbufs is reached. */ for (i = 0; i < nbufs && re->m_fifo_depth < re->m_fifolen; i++) { /* Ensure the FIFO is already blank, complain loudly! */ if (re->m_fifo[re->m_fifo_tail] != NULL) { device_printf(sc->sc_dev, "%s: Q%d: fifo[%d] != NULL (%p)\n", __func__, qtype, re->m_fifo_tail, re->m_fifo[re->m_fifo_tail]); /* Free the slot */ ath_edma_rxbuf_free(sc, re->m_fifo[re->m_fifo_tail]); re->m_fifo_depth--; /* XXX check it's not < 0 */ re->m_fifo[re->m_fifo_tail] = NULL; } bf = ath_edma_rxbuf_alloc(sc); /* XXX should ensure the FIFO is not NULL? */ if (bf == NULL) { device_printf(sc->sc_dev, "%s: Q%d: alloc failed: i=%d, nbufs=%d?\n", __func__, qtype, i, nbufs); break; } re->m_fifo[re->m_fifo_tail] = bf; /* Write to the RX FIFO */ DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: putrxbuf=%p (0x%jx)\n", __func__, qtype, bf->bf_desc, (uintmax_t) bf->bf_daddr); ath_hal_putrxbuf(sc->sc_ah, bf->bf_daddr, qtype); re->m_fifo_depth++; INCR(re->m_fifo_tail, re->m_fifolen); } /* * Return how many were allocated. */ DPRINTF(sc, ATH_DEBUG_EDMA_RX, "%s: Q%d: nbufs=%d, nalloced=%d\n", __func__, qtype, nbufs, i); return (i); } static int ath_edma_rxfifo_flush(struct ath_softc *sc, HAL_RX_QUEUE qtype) { struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; int i; ATH_RX_LOCK_ASSERT(sc); for (i = 0; i < re->m_fifolen; i++) { if (re->m_fifo[i] != NULL) { #ifdef ATH_DEBUG struct ath_buf *bf = re->m_fifo[i]; if (sc->sc_debug & ATH_DEBUG_RECV_DESC) ath_printrxbuf(sc, bf, 0, HAL_OK); #endif ath_edma_rxbuf_free(sc, re->m_fifo[i]); re->m_fifo[i] = NULL; re->m_fifo_depth--; } } if (re->m_rxpending != NULL) { m_freem(re->m_rxpending); re->m_rxpending = NULL; } re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; return (0); } /* * Setup the initial RX FIFO structure. */ static int ath_edma_setup_rxfifo(struct ath_softc *sc, HAL_RX_QUEUE qtype) { struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; ATH_RX_LOCK_ASSERT(sc); if (! ath_hal_getrxfifodepth(sc->sc_ah, qtype, &re->m_fifolen)) { device_printf(sc->sc_dev, "%s: qtype=%d, failed\n", __func__, qtype); return (-EINVAL); } if (bootverbose) device_printf(sc->sc_dev, "%s: type=%d, FIFO depth = %d entries\n", __func__, qtype, re->m_fifolen); /* Allocate ath_buf FIFO array, pre-zero'ed */ - re->m_fifo = malloc(sizeof(struct ath_buf *) * re->m_fifolen, - M_ATHDEV, - M_NOWAIT | M_ZERO); + re->m_fifo = mallocarray(re->m_fifolen, sizeof(struct ath_buf *), + M_ATHDEV, M_NOWAIT | M_ZERO); if (re->m_fifo == NULL) { device_printf(sc->sc_dev, "%s: malloc failed\n", __func__); return (-ENOMEM); } /* * Set initial "empty" state. */ re->m_rxpending = NULL; re->m_fifo_head = re->m_fifo_tail = re->m_fifo_depth = 0; return (0); } static int ath_edma_rxfifo_free(struct ath_softc *sc, HAL_RX_QUEUE qtype) { struct ath_rx_edma *re = &sc->sc_rxedma[qtype]; device_printf(sc->sc_dev, "%s: called; qtype=%d\n", __func__, qtype); free(re->m_fifo, M_ATHDEV); return (0); } static int ath_edma_dma_rxsetup(struct ath_softc *sc) { int error; /* * Create RX DMA tag and buffers. */ error = ath_descdma_setup_rx_edma(sc, &sc->sc_rxdma, &sc->sc_rxbuf, "rx", ath_rxbuf, sc->sc_rx_statuslen); if (error != 0) return error; ATH_RX_LOCK(sc); (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_HP); (void) ath_edma_setup_rxfifo(sc, HAL_RX_QUEUE_LP); ATH_RX_UNLOCK(sc); return (0); } static int ath_edma_dma_rxteardown(struct ath_softc *sc) { ATH_RX_LOCK(sc); ath_edma_flush_deferred_queue(sc); ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_HP); ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_HP); ath_edma_rxfifo_flush(sc, HAL_RX_QUEUE_LP); ath_edma_rxfifo_free(sc, HAL_RX_QUEUE_LP); ATH_RX_UNLOCK(sc); /* Free RX ath_buf */ /* Free RX DMA tag */ if (sc->sc_rxdma.dd_desc_len != 0) ath_descdma_cleanup(sc, &sc->sc_rxdma, &sc->sc_rxbuf); return (0); } void ath_recv_setup_edma(struct ath_softc *sc) { /* Set buffer size to 4k */ sc->sc_edma_bufsize = 4096; /* Fetch EDMA field and buffer sizes */ (void) ath_hal_getrxstatuslen(sc->sc_ah, &sc->sc_rx_statuslen); /* Configure the hardware with the RX buffer size */ (void) ath_hal_setrxbufsize(sc->sc_ah, sc->sc_edma_bufsize - sc->sc_rx_statuslen); if (bootverbose) { device_printf(sc->sc_dev, "RX status length: %d\n", sc->sc_rx_statuslen); device_printf(sc->sc_dev, "RX buffer size: %d\n", sc->sc_edma_bufsize); } sc->sc_rx.recv_stop = ath_edma_stoprecv; sc->sc_rx.recv_start = ath_edma_startrecv; sc->sc_rx.recv_flush = ath_edma_recv_flush; sc->sc_rx.recv_tasklet = ath_edma_recv_tasklet; sc->sc_rx.recv_rxbuf_init = ath_edma_rxbuf_init; sc->sc_rx.recv_setup = ath_edma_dma_rxsetup; sc->sc_rx.recv_teardown = ath_edma_dma_rxteardown; sc->sc_rx.recv_sched = ath_edma_recv_sched; sc->sc_rx.recv_sched_queue = ath_edma_recv_sched_queue; } Index: head/sys/dev/beri/virtio/virtio.c =================================================================== --- head/sys/dev/beri/virtio/virtio.c (revision 327948) +++ head/sys/dev/beri/virtio/virtio.c (revision 327949) @@ -1,260 +1,260 @@ /*- * Copyright (c) 2014 Ruslan Bukin * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * BERI virtio mmio backend common methods */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "pio_if.h" int vq_ring_ready(struct vqueue_info *vq) { return (vq->vq_flags & VQ_ALLOC); } int vq_has_descs(struct vqueue_info *vq) { return (vq_ring_ready(vq) && vq->vq_last_avail != be16toh(vq->vq_avail->idx)); } void * paddr_map(uint32_t offset, uint32_t phys, uint32_t size) { bus_space_handle_t bsh; if (bus_space_map(fdtbus_bs_tag, (phys + offset), size, 0, &bsh) != 0) { panic("Couldn't map 0x%08x\n", (phys + offset)); } return (void *)(bsh); } void paddr_unmap(void *phys, uint32_t size) { bus_space_unmap(fdtbus_bs_tag, (bus_space_handle_t)phys, size); } static inline void _vq_record(uint32_t offs, int i, volatile struct vring_desc *vd, struct iovec *iov, int n_iov, uint16_t *flags) { if (i >= n_iov) return; iov[i].iov_base = paddr_map(offs, be64toh(vd->addr), be32toh(vd->len)); iov[i].iov_len = be32toh(vd->len); if (flags != NULL) flags[i] = be16toh(vd->flags); } int vq_getchain(uint32_t offs, struct vqueue_info *vq, struct iovec *iov, int n_iov, uint16_t *flags) { volatile struct vring_desc *vdir, *vindir, *vp; int idx, ndesc, n_indir; int head, next; int i; idx = vq->vq_last_avail; ndesc = (be16toh(vq->vq_avail->idx) - idx); if (ndesc == 0) return (0); head = be16toh(vq->vq_avail->ring[idx & (vq->vq_qsize - 1)]); next = head; for (i = 0; i < VQ_MAX_DESCRIPTORS; next = be16toh(vdir->next)) { vdir = &vq->vq_desc[next]; if ((be16toh(vdir->flags) & VRING_DESC_F_INDIRECT) == 0) { _vq_record(offs, i, vdir, iov, n_iov, flags); i++; } else { n_indir = be32toh(vdir->len) / 16; vindir = paddr_map(offs, be64toh(vdir->addr), be32toh(vdir->len)); next = 0; for (;;) { vp = &vindir[next]; _vq_record(offs, i, vp, iov, n_iov, flags); i+=1; if ((be16toh(vp->flags) & \ VRING_DESC_F_NEXT) == 0) break; next = be16toh(vp->next); } paddr_unmap(__DEVOLATILE(void *, vindir), be32toh(vdir->len)); } if ((be16toh(vdir->flags) & VRING_DESC_F_NEXT) == 0) return (i); } return (i); } void vq_relchain(struct vqueue_info *vq, struct iovec *iov, int n, uint32_t iolen) { volatile struct vring_used_elem *vue; volatile struct vring_used *vu; uint16_t head, uidx, mask; int i; mask = vq->vq_qsize - 1; vu = vq->vq_used; head = be16toh(vq->vq_avail->ring[vq->vq_last_avail++ & mask]); uidx = be16toh(vu->idx); vue = &vu->ring[uidx++ & mask]; vue->id = htobe32(head); vue->len = htobe32(iolen); vu->idx = htobe16(uidx); /* Clean up */ for (i = 0; i < n; i++) { paddr_unmap((void *)iov[i].iov_base, iov[i].iov_len); } } int setup_pio(device_t dev, char *name, device_t *pio_dev) { phandle_t pio_node; struct fdt_ic *ic; phandle_t xref; phandle_t node; if ((node = ofw_bus_get_node(dev)) == -1) return (ENXIO); if (OF_searchencprop(node, name, &xref, sizeof(xref)) == -1) { return (ENXIO); } pio_node = OF_node_from_xref(xref); SLIST_FOREACH(ic, &fdt_ic_list_head, fdt_ics) { if (ic->iph == pio_node) { *pio_dev = ic->dev; return (0); } } return (ENXIO); } int setup_offset(device_t dev, uint32_t *offset) { pcell_t dts_value[2]; phandle_t mem_node; phandle_t xref; phandle_t node; int len; if ((node = ofw_bus_get_node(dev)) == -1) return (ENXIO); if (OF_searchencprop(node, "beri-mem", &xref, sizeof(xref)) == -1) { return (ENXIO); } mem_node = OF_node_from_xref(xref); if ((len = OF_getproplen(mem_node, "reg")) <= 0) return (ENXIO); OF_getencprop(mem_node, "reg", dts_value, len); *offset = dts_value[0]; return (0); } struct iovec * getcopy(struct iovec *iov, int n) { struct iovec *tiov; int i; - tiov = malloc(n * sizeof(struct iovec), M_DEVBUF, M_NOWAIT); + tiov = mallocarray(n, sizeof(struct iovec), M_DEVBUF, M_NOWAIT); for (i = 0; i < n; i++) { tiov[i].iov_base = iov[i].iov_base; tiov[i].iov_len = iov[i].iov_len; } return (tiov); } Index: head/sys/dev/bnxt/if_bnxt.c =================================================================== --- head/sys/dev/bnxt/if_bnxt.c (revision 327948) +++ head/sys/dev/bnxt/if_bnxt.c (revision 327949) @@ -1,2498 +1,2499 @@ /*- * Broadcom NetXtreme-C/E network driver. * * Copyright (c) 2016 Broadcom, All Rights Reserved. * The term Broadcom refers to Broadcom Limited and/or its subsidiaries * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_inet.h" #include "opt_inet6.h" #include "opt_rss.h" #include "ifdi_if.h" #include "bnxt.h" #include "bnxt_hwrm.h" #include "bnxt_ioctl.h" #include "bnxt_sysctl.h" #include "hsi_struct_def.h" /* * PCI Device ID Table */ static pci_vendor_info_t bnxt_vendor_info_array[] = { PVID(BROADCOM_VENDOR_ID, BCM57301, "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57302, "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57304, "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57311, "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57312, "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57314, "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57402, "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57402_NPAR, "Broadcom BCM57402 NetXtreme-E Partition"), PVID(BROADCOM_VENDOR_ID, BCM57404, "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57404_NPAR, "Broadcom BCM57404 NetXtreme-E Partition"), PVID(BROADCOM_VENDOR_ID, BCM57406, "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57406_NPAR, "Broadcom BCM57406 NetXtreme-E Partition"), PVID(BROADCOM_VENDOR_ID, BCM57407, "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57407_NPAR, "Broadcom BCM57407 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57407_SFP, "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet Controller"), PVID(BROADCOM_VENDOR_ID, BCM57412, "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR1, "Broadcom BCM57412 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57412_NPAR2, "Broadcom BCM57412 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57414, "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR1, "Broadcom BCM57414 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57414_NPAR2, "Broadcom BCM57414 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57416, "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR1, "Broadcom BCM57416 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57416_NPAR2, "Broadcom BCM57416 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57416_SFP, "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57417, "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR1, "Broadcom BCM57417 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57417_NPAR2, "Broadcom BCM57417 NetXtreme-E Ethernet Partition"), PVID(BROADCOM_VENDOR_ID, BCM57417_SFP, "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM57454, "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, BCM58700, "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet"), PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF1, "Broadcom NetXtreme-C Ethernet Virtual Function"), PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF2, "Broadcom NetXtreme-C Ethernet Virtual Function"), PVID(BROADCOM_VENDOR_ID, NETXTREME_C_VF3, "Broadcom NetXtreme-C Ethernet Virtual Function"), PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF1, "Broadcom NetXtreme-E Ethernet Virtual Function"), PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF2, "Broadcom NetXtreme-E Ethernet Virtual Function"), PVID(BROADCOM_VENDOR_ID, NETXTREME_E_VF3, "Broadcom NetXtreme-E Ethernet Virtual Function"), /* required last entry */ PVID_END }; /* * Function prototypes */ static void *bnxt_register(device_t dev); /* Soft queue setup and teardown */ static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets); static void bnxt_queues_free(if_ctx_t ctx); /* Device setup and teardown */ static int bnxt_attach_pre(if_ctx_t ctx); static int bnxt_attach_post(if_ctx_t ctx); static int bnxt_detach(if_ctx_t ctx); /* Device configuration */ static void bnxt_init(if_ctx_t ctx); static void bnxt_stop(if_ctx_t ctx); static void bnxt_multi_set(if_ctx_t ctx); static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu); static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr); static int bnxt_media_change(if_ctx_t ctx); static int bnxt_promisc_set(if_ctx_t ctx, int flags); static uint64_t bnxt_get_counter(if_ctx_t, ift_counter); static void bnxt_update_admin_status(if_ctx_t ctx); static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid); /* Interrupt enable / disable */ static void bnxt_intr_enable(if_ctx_t ctx); static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid); static void bnxt_disable_intr(if_ctx_t ctx); static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix); /* vlan support */ static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag); static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag); /* ioctl */ static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data); static int bnxt_shutdown(if_ctx_t ctx); static int bnxt_suspend(if_ctx_t ctx); static int bnxt_resume(if_ctx_t ctx); /* Internal support functions */ static int bnxt_probe_phy(struct bnxt_softc *softc); static void bnxt_add_media_types(struct bnxt_softc *softc); static int bnxt_pci_mapping(struct bnxt_softc *softc); static void bnxt_pci_mapping_free(struct bnxt_softc *softc); static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state); static int bnxt_handle_def_cp(void *arg); static int bnxt_handle_rx_cp(void *arg); static void bnxt_clear_ids(struct bnxt_softc *softc); static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr); static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr); static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr); static void bnxt_def_cp_task(void *context); static void bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl); static uint8_t get_phy_type(struct bnxt_softc *softc); static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link); static void bnxt_get_wol_settings(struct bnxt_softc *softc); static int bnxt_wol_config(if_ctx_t ctx); /* * Device Interface Declaration */ static device_method_t bnxt_methods[] = { /* Device interface */ DEVMETHOD(device_register, bnxt_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD_END }; static driver_t bnxt_driver = { "bnxt", bnxt_methods, sizeof(struct bnxt_softc), }; devclass_t bnxt_devclass; DRIVER_MODULE(bnxt, pci, bnxt_driver, bnxt_devclass, 0, 0); MODULE_DEPEND(bnxt, pci, 1, 1, 1); MODULE_DEPEND(bnxt, ether, 1, 1, 1); MODULE_DEPEND(bnxt, iflib, 1, 1, 1); IFLIB_PNP_INFO(pci, bnxt, bnxt_vendor_info_array); static device_method_t bnxt_iflib_methods[] = { DEVMETHOD(ifdi_tx_queues_alloc, bnxt_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, bnxt_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, bnxt_queues_free), DEVMETHOD(ifdi_attach_pre, bnxt_attach_pre), DEVMETHOD(ifdi_attach_post, bnxt_attach_post), DEVMETHOD(ifdi_detach, bnxt_detach), DEVMETHOD(ifdi_init, bnxt_init), DEVMETHOD(ifdi_stop, bnxt_stop), DEVMETHOD(ifdi_multi_set, bnxt_multi_set), DEVMETHOD(ifdi_mtu_set, bnxt_mtu_set), DEVMETHOD(ifdi_media_status, bnxt_media_status), DEVMETHOD(ifdi_media_change, bnxt_media_change), DEVMETHOD(ifdi_promisc_set, bnxt_promisc_set), DEVMETHOD(ifdi_get_counter, bnxt_get_counter), DEVMETHOD(ifdi_update_admin_status, bnxt_update_admin_status), DEVMETHOD(ifdi_timer, bnxt_if_timer), DEVMETHOD(ifdi_intr_enable, bnxt_intr_enable), DEVMETHOD(ifdi_tx_queue_intr_enable, bnxt_tx_queue_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, bnxt_rx_queue_intr_enable), DEVMETHOD(ifdi_intr_disable, bnxt_disable_intr), DEVMETHOD(ifdi_msix_intr_assign, bnxt_msix_intr_assign), DEVMETHOD(ifdi_vlan_register, bnxt_vlan_register), DEVMETHOD(ifdi_vlan_unregister, bnxt_vlan_unregister), DEVMETHOD(ifdi_priv_ioctl, bnxt_priv_ioctl), DEVMETHOD(ifdi_suspend, bnxt_suspend), DEVMETHOD(ifdi_shutdown, bnxt_shutdown), DEVMETHOD(ifdi_resume, bnxt_resume), DEVMETHOD_END }; static driver_t bnxt_iflib_driver = { "bnxt", bnxt_iflib_methods, sizeof(struct bnxt_softc) }; /* * iflib shared context */ #define BNXT_DRIVER_VERSION "1.0.0.2" char bnxt_driver_version[] = BNXT_DRIVER_VERSION; extern struct if_txrx bnxt_txrx; static struct if_shared_ctx bnxt_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_driver = &bnxt_iflib_driver, .isc_nfl = 2, // Number of Free Lists .isc_flags = IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ | IFLIB_NEED_ETHER_PAD, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = BNXT_TSO_SIZE, .isc_tx_maxsegsize = BNXT_TSO_SIZE, .isc_rx_maxsize = BNXT_TSO_SIZE, .isc_rx_maxsegsize = BNXT_TSO_SIZE, // Only use a single segment to avoid page size constraints .isc_rx_nsegments = 1, .isc_ntxqs = 2, .isc_nrxqs = 3, .isc_nrxd_min = {16, 16, 16}, .isc_nrxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 8, PAGE_SIZE / sizeof(struct rx_prod_pkt_bd), PAGE_SIZE / sizeof(struct rx_prod_pkt_bd)}, .isc_nrxd_max = {INT32_MAX, INT32_MAX, INT32_MAX}, .isc_ntxd_min = {16, 16, 16}, .isc_ntxd_default = {PAGE_SIZE / sizeof(struct cmpl_base) * 2, PAGE_SIZE / sizeof(struct tx_bd_short)}, .isc_ntxd_max = {INT32_MAX, INT32_MAX, INT32_MAX}, .isc_admin_intrcnt = 1, .isc_vendor_info = bnxt_vendor_info_array, .isc_driver_version = bnxt_driver_version, }; if_shared_ctx_t bnxt_sctx = &bnxt_sctx_init; /* * Device Methods */ static void * bnxt_register(device_t dev) { return bnxt_sctx; } /* * Device Dependent Configuration Functions */ /* Soft queue setup and teardown */ static int bnxt_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct bnxt_softc *softc; int i; int rc; softc = iflib_get_softc(ctx); - softc->tx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * ntxqsets, + softc->tx_cp_rings = mallocarray(ntxqsets, sizeof(struct bnxt_cp_ring), M_DEVBUF, M_NOWAIT | M_ZERO); if (!softc->tx_cp_rings) { device_printf(iflib_get_dev(ctx), "unable to allocate TX completion rings\n"); rc = ENOMEM; goto cp_alloc_fail; } - softc->tx_rings = malloc(sizeof(struct bnxt_ring) * ntxqsets, + softc->tx_rings = mallocarray(ntxqsets, sizeof(struct bnxt_ring), M_DEVBUF, M_NOWAIT | M_ZERO); if (!softc->tx_rings) { device_printf(iflib_get_dev(ctx), "unable to allocate TX rings\n"); rc = ENOMEM; goto ring_alloc_fail; } rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats) * ntxqsets, &softc->tx_stats, 0); if (rc) goto dma_alloc_fail; bus_dmamap_sync(softc->tx_stats.idi_tag, softc->tx_stats.idi_map, BUS_DMASYNC_PREREAD); for (i = 0; i < ntxqsets; i++) { /* Set up the completion ring */ softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE; softc->tx_cp_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->tx_cp_rings[i].ring.softc = softc; softc->tx_cp_rings[i].ring.id = (softc->scctx->isc_nrxqsets * 2) + 1 + i; softc->tx_cp_rings[i].ring.doorbell = softc->tx_cp_rings[i].ring.id * 0x80; softc->tx_cp_rings[i].ring.ring_size = softc->scctx->isc_ntxd[0]; softc->tx_cp_rings[i].ring.vaddr = vaddrs[i * ntxqs]; softc->tx_cp_rings[i].ring.paddr = paddrs[i * ntxqs]; /* Set up the TX ring */ softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->tx_rings[i].softc = softc; softc->tx_rings[i].id = (softc->scctx->isc_nrxqsets * 2) + 1 + i; softc->tx_rings[i].doorbell = softc->tx_rings[i].id * 0x80; softc->tx_rings[i].ring_size = softc->scctx->isc_ntxd[1]; softc->tx_rings[i].vaddr = vaddrs[i * ntxqs + 1]; softc->tx_rings[i].paddr = paddrs[i * ntxqs + 1]; bnxt_create_tx_sysctls(softc, i); } softc->ntxqsets = ntxqsets; return rc; dma_alloc_fail: free(softc->tx_rings, M_DEVBUF); ring_alloc_fail: free(softc->tx_cp_rings, M_DEVBUF); cp_alloc_fail: return rc; } static void bnxt_queues_free(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); // Free TX queues iflib_dma_free(&softc->tx_stats); free(softc->tx_rings, M_DEVBUF); softc->tx_rings = NULL; free(softc->tx_cp_rings, M_DEVBUF); softc->tx_cp_rings = NULL; softc->ntxqsets = 0; // Free RX queues iflib_dma_free(&softc->rx_stats); iflib_dma_free(&softc->hw_tx_port_stats); iflib_dma_free(&softc->hw_rx_port_stats); free(softc->grp_info, M_DEVBUF); free(softc->ag_rings, M_DEVBUF); free(softc->rx_rings, M_DEVBUF); free(softc->rx_cp_rings, M_DEVBUF); } static int bnxt_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct bnxt_softc *softc; int i; int rc; softc = iflib_get_softc(ctx); - softc->rx_cp_rings = malloc(sizeof(struct bnxt_cp_ring) * nrxqsets, + softc->rx_cp_rings = mallocarray(nrxqsets, sizeof(struct bnxt_cp_ring), M_DEVBUF, M_NOWAIT | M_ZERO); if (!softc->rx_cp_rings) { device_printf(iflib_get_dev(ctx), "unable to allocate RX completion rings\n"); rc = ENOMEM; goto cp_alloc_fail; } - softc->rx_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets, + softc->rx_rings = mallocarray(nrxqsets, sizeof(struct bnxt_ring), M_DEVBUF, M_NOWAIT | M_ZERO); if (!softc->rx_rings) { device_printf(iflib_get_dev(ctx), "unable to allocate RX rings\n"); rc = ENOMEM; goto ring_alloc_fail; } - softc->ag_rings = malloc(sizeof(struct bnxt_ring) * nrxqsets, + softc->ag_rings = mallocarray(nrxqsets, sizeof(struct bnxt_ring), M_DEVBUF, M_NOWAIT | M_ZERO); if (!softc->ag_rings) { device_printf(iflib_get_dev(ctx), "unable to allocate aggregation rings\n"); rc = ENOMEM; goto ag_alloc_fail; } - softc->grp_info = malloc(sizeof(struct bnxt_grp_info) * nrxqsets, + softc->grp_info = mallocarray(nrxqsets, sizeof(struct bnxt_grp_info), M_DEVBUF, M_NOWAIT | M_ZERO); if (!softc->grp_info) { device_printf(iflib_get_dev(ctx), "unable to allocate ring groups\n"); rc = ENOMEM; goto grp_alloc_fail; } rc = iflib_dma_alloc(ctx, sizeof(struct ctx_hw_stats) * nrxqsets, &softc->rx_stats, 0); if (rc) goto hw_stats_alloc_fail; bus_dmamap_sync(softc->rx_stats.idi_tag, softc->rx_stats.idi_map, BUS_DMASYNC_PREREAD); /* * Additional 512 bytes for future expansion. * To prevent corruption when loaded with newer firmwares with added counters. * This can be deleted when there will be no further additions of counters. */ #define BNXT_PORT_STAT_PADDING 512 rc = iflib_dma_alloc(ctx, sizeof(struct rx_port_stats) + BNXT_PORT_STAT_PADDING, &softc->hw_rx_port_stats, 0); if (rc) goto hw_port_rx_stats_alloc_fail; bus_dmamap_sync(softc->hw_rx_port_stats.idi_tag, softc->hw_rx_port_stats.idi_map, BUS_DMASYNC_PREREAD); rc = iflib_dma_alloc(ctx, sizeof(struct tx_port_stats) + BNXT_PORT_STAT_PADDING, &softc->hw_tx_port_stats, 0); if (rc) goto hw_port_tx_stats_alloc_fail; bus_dmamap_sync(softc->hw_tx_port_stats.idi_tag, softc->hw_tx_port_stats.idi_map, BUS_DMASYNC_PREREAD); softc->rx_port_stats = (void *) softc->hw_rx_port_stats.idi_vaddr; softc->tx_port_stats = (void *) softc->hw_tx_port_stats.idi_vaddr; for (i = 0; i < nrxqsets; i++) { /* Allocation the completion ring */ softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE; softc->rx_cp_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->rx_cp_rings[i].ring.softc = softc; softc->rx_cp_rings[i].ring.id = i + 1; softc->rx_cp_rings[i].ring.doorbell = softc->rx_cp_rings[i].ring.id * 0x80; /* * If this ring overflows, RX stops working. */ softc->rx_cp_rings[i].ring.ring_size = softc->scctx->isc_nrxd[0]; softc->rx_cp_rings[i].ring.vaddr = vaddrs[i * nrxqs]; softc->rx_cp_rings[i].ring.paddr = paddrs[i * nrxqs]; /* Allocate the RX ring */ softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->rx_rings[i].softc = softc; softc->rx_rings[i].id = i + 1; softc->rx_rings[i].doorbell = softc->rx_rings[i].id * 0x80; softc->rx_rings[i].ring_size = softc->scctx->isc_nrxd[1]; softc->rx_rings[i].vaddr = vaddrs[i * nrxqs + 1]; softc->rx_rings[i].paddr = paddrs[i * nrxqs + 1]; /* Allocate the TPA start buffer */ - softc->rx_rings[i].tpa_start = malloc(sizeof(struct bnxt_full_tpa_start) * - (RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT), - M_DEVBUF, M_NOWAIT | M_ZERO); + softc->rx_rings[i].tpa_start = mallocarray( + RX_TPA_START_CMPL_AGG_ID_MASK >> RX_TPA_START_CMPL_AGG_ID_SFT, + sizeof(struct bnxt_full_tpa_start), M_DEVBUF, + M_NOWAIT | M_ZERO); if (softc->rx_rings[i].tpa_start == NULL) { rc = -ENOMEM; device_printf(softc->dev, "Unable to allocate space for TPA\n"); goto tpa_alloc_fail; } /* Allocate the AG ring */ softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->ag_rings[i].softc = softc; softc->ag_rings[i].id = nrxqsets + i + 1; softc->ag_rings[i].doorbell = softc->ag_rings[i].id * 0x80; softc->ag_rings[i].ring_size = softc->scctx->isc_nrxd[2]; softc->ag_rings[i].vaddr = vaddrs[i * nrxqs + 2]; softc->ag_rings[i].paddr = paddrs[i * nrxqs + 2]; /* Allocate the ring group */ softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE; softc->grp_info[i].stats_ctx = softc->rx_cp_rings[i].stats_ctx_id; softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id; softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id; softc->grp_info[i].cp_ring_id = softc->rx_cp_rings[i].ring.phys_id; bnxt_create_rx_sysctls(softc, i); } /* * When SR-IOV is enabled, avoid each VF sending PORT_QSTATS * HWRM every sec with which firmware timeouts can happen */ if (BNXT_PF(softc)) bnxt_create_port_stats_sysctls(softc); /* And finally, the VNIC */ softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE; softc->vnic_info.flow_id = (uint16_t)HWRM_NA_SIGNATURE; softc->vnic_info.filter_id = -1; softc->vnic_info.def_ring_grp = (uint16_t)HWRM_NA_SIGNATURE; softc->vnic_info.cos_rule = (uint16_t)HWRM_NA_SIGNATURE; softc->vnic_info.lb_rule = (uint16_t)HWRM_NA_SIGNATURE; softc->vnic_info.rx_mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST; softc->vnic_info.mc_list_count = 0; softc->vnic_info.flags = BNXT_VNIC_FLAG_DEFAULT; rc = iflib_dma_alloc(ctx, BNXT_MAX_MC_ADDRS * ETHER_ADDR_LEN, &softc->vnic_info.mc_list, 0); if (rc) goto mc_list_alloc_fail; /* The VNIC RSS Hash Key */ rc = iflib_dma_alloc(ctx, HW_HASH_KEY_SIZE, &softc->vnic_info.rss_hash_key_tbl, 0); if (rc) goto rss_hash_alloc_fail; bus_dmamap_sync(softc->vnic_info.rss_hash_key_tbl.idi_tag, softc->vnic_info.rss_hash_key_tbl.idi_map, BUS_DMASYNC_PREWRITE); memcpy(softc->vnic_info.rss_hash_key_tbl.idi_vaddr, softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE); /* Allocate the RSS tables */ rc = iflib_dma_alloc(ctx, HW_HASH_INDEX_SIZE * sizeof(uint16_t), &softc->vnic_info.rss_grp_tbl, 0); if (rc) goto rss_grp_alloc_fail; bus_dmamap_sync(softc->vnic_info.rss_grp_tbl.idi_tag, softc->vnic_info.rss_grp_tbl.idi_map, BUS_DMASYNC_PREWRITE); memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff, softc->vnic_info.rss_grp_tbl.idi_size); softc->nrxqsets = nrxqsets; return rc; rss_grp_alloc_fail: iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl); rss_hash_alloc_fail: iflib_dma_free(&softc->vnic_info.mc_list); tpa_alloc_fail: mc_list_alloc_fail: for (i = i - 1; i >= 0; i--) free(softc->rx_rings[i].tpa_start, M_DEVBUF); iflib_dma_free(&softc->hw_tx_port_stats); hw_port_tx_stats_alloc_fail: iflib_dma_free(&softc->hw_rx_port_stats); hw_port_rx_stats_alloc_fail: iflib_dma_free(&softc->rx_stats); hw_stats_alloc_fail: free(softc->grp_info, M_DEVBUF); grp_alloc_fail: free(softc->ag_rings, M_DEVBUF); ag_alloc_fail: free(softc->rx_rings, M_DEVBUF); ring_alloc_fail: free(softc->rx_cp_rings, M_DEVBUF); cp_alloc_fail: return rc; } static void bnxt_free_hwrm_short_cmd_req(struct bnxt_softc *softc) { if (softc->hwrm_short_cmd_req_addr.idi_vaddr) iflib_dma_free(&softc->hwrm_short_cmd_req_addr); softc->hwrm_short_cmd_req_addr.idi_vaddr = NULL; } static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt_softc *softc) { int rc; rc = iflib_dma_alloc(softc->ctx, softc->hwrm_max_req_len, &softc->hwrm_short_cmd_req_addr, BUS_DMA_NOWAIT); return rc; } /* Device setup and teardown */ static int bnxt_attach_pre(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); if_softc_ctx_t scctx; int rc = 0; softc->ctx = ctx; softc->dev = iflib_get_dev(ctx); softc->media = iflib_get_media(ctx); softc->scctx = iflib_get_softc_ctx(ctx); softc->sctx = iflib_get_sctx(ctx); scctx = softc->scctx; /* TODO: Better way of detecting NPAR/VF is needed */ switch (pci_get_device(softc->dev)) { case BCM57402_NPAR: case BCM57404_NPAR: case BCM57406_NPAR: case BCM57407_NPAR: case BCM57412_NPAR1: case BCM57412_NPAR2: case BCM57414_NPAR1: case BCM57414_NPAR2: case BCM57416_NPAR1: case BCM57416_NPAR2: softc->flags |= BNXT_FLAG_NPAR; break; case NETXTREME_C_VF1: case NETXTREME_C_VF2: case NETXTREME_C_VF3: case NETXTREME_E_VF1: case NETXTREME_E_VF2: case NETXTREME_E_VF3: softc->flags |= BNXT_FLAG_VF; break; } pci_enable_busmaster(softc->dev); if (bnxt_pci_mapping(softc)) return (ENXIO); /* HWRM setup/init */ BNXT_HWRM_LOCK_INIT(softc, device_get_nameunit(softc->dev)); rc = bnxt_alloc_hwrm_dma_mem(softc); if (rc) goto dma_fail; /* Get firmware version and compare with driver */ softc->ver_info = malloc(sizeof(struct bnxt_ver_info), M_DEVBUF, M_NOWAIT | M_ZERO); if (softc->ver_info == NULL) { rc = ENOMEM; device_printf(softc->dev, "Unable to allocate space for version info\n"); goto ver_alloc_fail; } /* Default minimum required HWRM version */ softc->ver_info->hwrm_min_major = 1; softc->ver_info->hwrm_min_minor = 2; softc->ver_info->hwrm_min_update = 2; rc = bnxt_hwrm_ver_get(softc); if (rc) { device_printf(softc->dev, "attach: hwrm ver get failed\n"); goto ver_fail; } if (softc->flags & BNXT_FLAG_SHORT_CMD) { rc = bnxt_alloc_hwrm_short_cmd_req(softc); if (rc) goto hwrm_short_cmd_alloc_fail; } /* Get NVRAM info */ if (BNXT_PF(softc)) { softc->nvm_info = malloc(sizeof(struct bnxt_nvram_info), M_DEVBUF, M_NOWAIT | M_ZERO); if (softc->nvm_info == NULL) { rc = ENOMEM; device_printf(softc->dev, "Unable to allocate space for NVRAM info\n"); goto nvm_alloc_fail; } rc = bnxt_hwrm_nvm_get_dev_info(softc, &softc->nvm_info->mfg_id, &softc->nvm_info->device_id, &softc->nvm_info->sector_size, &softc->nvm_info->size, &softc->nvm_info->reserved_size, &softc->nvm_info->available_size); } /* Register the driver with the FW */ rc = bnxt_hwrm_func_drv_rgtr(softc); if (rc) { device_printf(softc->dev, "attach: hwrm drv rgtr failed\n"); goto drv_rgtr_fail; } rc = bnxt_hwrm_func_rgtr_async_events(softc, NULL, 0); if (rc) { device_printf(softc->dev, "attach: hwrm rgtr async evts failed\n"); goto drv_rgtr_fail; } /* Get the HW capabilities */ rc = bnxt_hwrm_func_qcaps(softc); if (rc) goto failed; /* Get the current configuration of this function */ rc = bnxt_hwrm_func_qcfg(softc); if (rc) { device_printf(softc->dev, "attach: hwrm func qcfg failed\n"); goto failed; } iflib_set_mac(ctx, softc->func.mac_addr); scctx->isc_txrx = &bnxt_txrx; scctx->isc_tx_csum_flags = (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TCP_IPV6 | CSUM_UDP_IPV6 | CSUM_TSO); scctx->isc_capenable = /* These are translated to hwassit bits */ IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 | /* These are checked by iflib */ IFCAP_LRO | IFCAP_VLAN_HWFILTER | /* These are part of the iflib mask */ IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | /* These likely get lost... */ IFCAP_VLAN_HWCSUM | IFCAP_JUMBO_MTU; if (bnxt_wol_supported(softc)) scctx->isc_capenable |= IFCAP_WOL_MAGIC; /* Get the queue config */ rc = bnxt_hwrm_queue_qportcfg(softc); if (rc) { device_printf(softc->dev, "attach: hwrm qportcfg failed\n"); goto failed; } bnxt_get_wol_settings(softc); /* Now perform a function reset */ rc = bnxt_hwrm_func_reset(softc); bnxt_clear_ids(softc); if (rc) goto failed; /* Now set up iflib sc */ scctx->isc_tx_nsegments = 31, scctx->isc_tx_tso_segments_max = 31; scctx->isc_tx_tso_size_max = BNXT_TSO_SIZE; scctx->isc_tx_tso_segsize_max = BNXT_TSO_SIZE; scctx->isc_vectors = softc->func.max_cp_rings; scctx->isc_min_frame_size = BNXT_MIN_FRAME_SIZE; scctx->isc_txrx = &bnxt_txrx; if (scctx->isc_nrxd[0] < ((scctx->isc_nrxd[1] * 4) + scctx->isc_nrxd[2])) device_printf(softc->dev, "WARNING: nrxd0 (%d) should be at least 4 * nrxd1 (%d) + nrxd2 (%d). Driver may be unstable\n", scctx->isc_nrxd[0], scctx->isc_nrxd[1], scctx->isc_nrxd[2]); if (scctx->isc_ntxd[0] < scctx->isc_ntxd[1] * 2) device_printf(softc->dev, "WARNING: ntxd0 (%d) should be at least 2 * ntxd1 (%d). Driver may be unstable\n", scctx->isc_ntxd[0], scctx->isc_ntxd[1]); scctx->isc_txqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_ntxd[0]; scctx->isc_txqsizes[1] = sizeof(struct tx_bd_short) * scctx->isc_ntxd[1]; scctx->isc_rxqsizes[0] = sizeof(struct cmpl_base) * scctx->isc_nrxd[0]; scctx->isc_rxqsizes[1] = sizeof(struct rx_prod_pkt_bd) * scctx->isc_nrxd[1]; scctx->isc_rxqsizes[2] = sizeof(struct rx_prod_pkt_bd) * scctx->isc_nrxd[2]; scctx->isc_nrxqsets_max = min(pci_msix_count(softc->dev)-1, softc->fn_qcfg.alloc_completion_rings - 1); scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max, softc->fn_qcfg.alloc_rx_rings); scctx->isc_nrxqsets_max = min(scctx->isc_nrxqsets_max, softc->fn_qcfg.alloc_vnics); scctx->isc_ntxqsets_max = min(softc->fn_qcfg.alloc_tx_rings, softc->fn_qcfg.alloc_completion_rings - scctx->isc_nrxqsets_max - 1); scctx->isc_rss_table_size = HW_HASH_INDEX_SIZE; scctx->isc_rss_table_mask = scctx->isc_rss_table_size - 1; /* iflib will map and release this bar */ scctx->isc_msix_bar = pci_msix_table_bar(softc->dev); /* * Default settings for HW LRO (TPA): * Disable HW LRO by default * Can be enabled after taking care of 'packet forwarding' */ softc->hw_lro.enable = 0; softc->hw_lro.is_mode_gro = 0; softc->hw_lro.max_agg_segs = 5; /* 2^5 = 32 segs */ softc->hw_lro.max_aggs = HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX; softc->hw_lro.min_agg_len = 512; /* Allocate the default completion ring */ softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE; softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->def_cp_ring.ring.softc = softc; softc->def_cp_ring.ring.id = 0; softc->def_cp_ring.ring.doorbell = softc->def_cp_ring.ring.id * 0x80; softc->def_cp_ring.ring.ring_size = PAGE_SIZE / sizeof(struct cmpl_base); rc = iflib_dma_alloc(ctx, sizeof(struct cmpl_base) * softc->def_cp_ring.ring.ring_size, &softc->def_cp_ring_mem, 0); softc->def_cp_ring.ring.vaddr = softc->def_cp_ring_mem.idi_vaddr; softc->def_cp_ring.ring.paddr = softc->def_cp_ring_mem.idi_paddr; iflib_config_gtask_init(ctx, &softc->def_cp_task, bnxt_def_cp_task, "dflt_cp"); rc = bnxt_init_sysctl_ctx(softc); if (rc) goto init_sysctl_failed; if (BNXT_PF(softc)) { rc = bnxt_create_nvram_sysctls(softc->nvm_info); if (rc) goto failed; } arc4rand(softc->vnic_info.rss_hash_key, HW_HASH_KEY_SIZE, 0); softc->vnic_info.rss_hash_type = HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 | HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 | HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 | HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 | HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 | HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; rc = bnxt_create_config_sysctls_pre(softc); if (rc) goto failed; rc = bnxt_create_hw_lro_sysctls(softc); if (rc) goto failed; rc = bnxt_create_pause_fc_sysctls(softc); if (rc) goto failed; /* Initialize the vlan list */ SLIST_INIT(&softc->vnic_info.vlan_tags); softc->vnic_info.vlan_tag_list.idi_vaddr = NULL; return (rc); failed: bnxt_free_sysctl_ctx(softc); init_sysctl_failed: bnxt_hwrm_func_drv_unrgtr(softc, false); drv_rgtr_fail: if (BNXT_PF(softc)) free(softc->nvm_info, M_DEVBUF); nvm_alloc_fail: bnxt_free_hwrm_short_cmd_req(softc); hwrm_short_cmd_alloc_fail: ver_fail: free(softc->ver_info, M_DEVBUF); ver_alloc_fail: bnxt_free_hwrm_dma_mem(softc); dma_fail: BNXT_HWRM_LOCK_DESTROY(softc); bnxt_pci_mapping_free(softc); pci_disable_busmaster(softc->dev); return (rc); } static int bnxt_attach_post(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); int rc; bnxt_create_config_sysctls_post(softc); /* Update link state etc... */ rc = bnxt_probe_phy(softc); if (rc) goto failed; /* Needs to be done after probing the phy */ bnxt_create_ver_sysctls(softc); bnxt_add_media_types(softc); ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO); softc->scctx->isc_max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; failed: return rc; } static int bnxt_detach(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct bnxt_vlan_tag *tag; struct bnxt_vlan_tag *tmp; int i; bnxt_wol_config(ctx); bnxt_do_disable_intr(&softc->def_cp_ring); bnxt_free_sysctl_ctx(softc); bnxt_hwrm_func_reset(softc); bnxt_clear_ids(softc); iflib_irq_free(ctx, &softc->def_cp_ring.irq); iflib_config_gtask_deinit(&softc->def_cp_task); /* We need to free() these here... */ for (i = softc->nrxqsets-1; i>=0; i--) { iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq); } iflib_dma_free(&softc->vnic_info.mc_list); iflib_dma_free(&softc->vnic_info.rss_hash_key_tbl); iflib_dma_free(&softc->vnic_info.rss_grp_tbl); if (softc->vnic_info.vlan_tag_list.idi_vaddr) iflib_dma_free(&softc->vnic_info.vlan_tag_list); SLIST_FOREACH_SAFE(tag, &softc->vnic_info.vlan_tags, next, tmp) free(tag, M_DEVBUF); iflib_dma_free(&softc->def_cp_ring_mem); for (i = 0; i < softc->nrxqsets; i++) free(softc->rx_rings[i].tpa_start, M_DEVBUF); free(softc->ver_info, M_DEVBUF); if (BNXT_PF(softc)) free(softc->nvm_info, M_DEVBUF); bnxt_hwrm_func_drv_unrgtr(softc, false); bnxt_free_hwrm_dma_mem(softc); bnxt_free_hwrm_short_cmd_req(softc); BNXT_HWRM_LOCK_DESTROY(softc); pci_disable_busmaster(softc->dev); bnxt_pci_mapping_free(softc); return 0; } /* Device configuration */ static void bnxt_init(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct ifmediareq ifmr; int i, j; int rc; rc = bnxt_hwrm_func_reset(softc); if (rc) return; bnxt_clear_ids(softc); /* Allocate the default completion ring */ softc->def_cp_ring.cons = UINT32_MAX; softc->def_cp_ring.v_bit = 1; bnxt_mark_cpr_invalid(&softc->def_cp_ring); rc = bnxt_hwrm_ring_alloc(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, &softc->def_cp_ring.ring, (uint16_t)HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, true); if (rc) goto fail; /* And now set the default CP ring as the async CP ring */ rc = bnxt_cfg_async_cr(softc); if (rc) goto fail; for (i = 0; i < softc->nrxqsets; i++) { /* Allocate the statistics context */ rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->rx_cp_rings[i], softc->rx_stats.idi_paddr + (sizeof(struct ctx_hw_stats) * i)); if (rc) goto fail; /* Allocate the completion ring */ softc->rx_cp_rings[i].cons = UINT32_MAX; softc->rx_cp_rings[i].v_bit = 1; softc->rx_cp_rings[i].last_idx = UINT32_MAX; bnxt_mark_cpr_invalid(&softc->rx_cp_rings[i]); rc = bnxt_hwrm_ring_alloc(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, &softc->rx_cp_rings[i].ring, (uint16_t)HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, true); if (rc) goto fail; /* Allocate the RX ring */ rc = bnxt_hwrm_ring_alloc(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->rx_rings[i], (uint16_t)HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, false); if (rc) goto fail; BNXT_RX_DB(&softc->rx_rings[i], 0); /* TODO: Cumulus+ doesn't need the double doorbell */ BNXT_RX_DB(&softc->rx_rings[i], 0); /* Allocate the AG ring */ rc = bnxt_hwrm_ring_alloc(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, &softc->ag_rings[i], (uint16_t)HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, false); if (rc) goto fail; BNXT_RX_DB(&softc->rx_rings[i], 0); /* TODO: Cumulus+ doesn't need the double doorbell */ BNXT_RX_DB(&softc->ag_rings[i], 0); /* Allocate the ring group */ softc->grp_info[i].stats_ctx = softc->rx_cp_rings[i].stats_ctx_id; softc->grp_info[i].rx_ring_id = softc->rx_rings[i].phys_id; softc->grp_info[i].ag_ring_id = softc->ag_rings[i].phys_id; softc->grp_info[i].cp_ring_id = softc->rx_cp_rings[i].ring.phys_id; rc = bnxt_hwrm_ring_grp_alloc(softc, &softc->grp_info[i]); if (rc) goto fail; } /* Allocate the VNIC RSS context */ rc = bnxt_hwrm_vnic_ctx_alloc(softc, &softc->vnic_info.rss_id); if (rc) goto fail; /* Allocate the vnic */ softc->vnic_info.def_ring_grp = softc->grp_info[0].grp_id; softc->vnic_info.mru = softc->scctx->isc_max_frame_size; rc = bnxt_hwrm_vnic_alloc(softc, &softc->vnic_info); if (rc) goto fail; rc = bnxt_hwrm_vnic_cfg(softc, &softc->vnic_info); if (rc) goto fail; rc = bnxt_hwrm_set_filter(softc, &softc->vnic_info); if (rc) goto fail; /* Enable RSS on the VNICs */ for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++) { ((uint16_t *) softc->vnic_info.rss_grp_tbl.idi_vaddr)[i] = htole16(softc->grp_info[j].grp_id); if (++j == softc->nrxqsets) j = 0; } rc = bnxt_hwrm_rss_cfg(softc, &softc->vnic_info, softc->vnic_info.rss_hash_type); if (rc) goto fail; rc = bnxt_hwrm_vnic_tpa_cfg(softc); if (rc) goto fail; for (i = 0; i < softc->ntxqsets; i++) { /* Allocate the statistics context */ rc = bnxt_hwrm_stat_ctx_alloc(softc, &softc->tx_cp_rings[i], softc->tx_stats.idi_paddr + (sizeof(struct ctx_hw_stats) * i)); if (rc) goto fail; /* Allocate the completion ring */ softc->tx_cp_rings[i].cons = UINT32_MAX; softc->tx_cp_rings[i].v_bit = 1; bnxt_mark_cpr_invalid(&softc->tx_cp_rings[i]); rc = bnxt_hwrm_ring_alloc(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL, &softc->tx_cp_rings[i].ring, (uint16_t)HWRM_NA_SIGNATURE, HWRM_NA_SIGNATURE, false); if (rc) goto fail; /* Allocate the TX ring */ rc = bnxt_hwrm_ring_alloc(softc, HWRM_RING_ALLOC_INPUT_RING_TYPE_TX, &softc->tx_rings[i], softc->tx_cp_rings[i].ring.phys_id, softc->tx_cp_rings[i].stats_ctx_id, false); if (rc) goto fail; BNXT_TX_DB(&softc->tx_rings[i], 0); /* TODO: Cumulus+ doesn't need the double doorbell */ BNXT_TX_DB(&softc->tx_rings[i], 0); } bnxt_do_enable_intr(&softc->def_cp_ring); bnxt_media_status(softc->ctx, &ifmr); bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info); return; fail: bnxt_hwrm_func_reset(softc); bnxt_clear_ids(softc); return; } static void bnxt_stop(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); bnxt_do_disable_intr(&softc->def_cp_ring); bnxt_hwrm_func_reset(softc); bnxt_clear_ids(softc); return; } static void bnxt_multi_set(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); uint8_t *mta; int cnt, mcnt; mcnt = if_multiaddr_count(ifp, -1); if (mcnt > BNXT_MAX_MC_ADDRS) { softc->vnic_info.rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info); } else { softc->vnic_info.rx_mask &= ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; mta = softc->vnic_info.mc_list.idi_vaddr; bzero(mta, softc->vnic_info.mc_list.idi_size); if_multiaddr_array(ifp, mta, &cnt, mcnt); bus_dmamap_sync(softc->vnic_info.mc_list.idi_tag, softc->vnic_info.mc_list.idi_map, BUS_DMASYNC_PREWRITE); softc->vnic_info.mc_list_count = cnt; softc->vnic_info.rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST; if (bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info)) device_printf(softc->dev, "set_multi: rx_mask set failed\n"); } } static int bnxt_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct bnxt_softc *softc = iflib_get_softc(ctx); if (mtu > BNXT_MAX_MTU) return EINVAL; softc->scctx->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; return 0; } static void bnxt_media_status(if_ctx_t ctx, struct ifmediareq * ifmr) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct bnxt_link_info *link_info = &softc->link_info; struct ifmedia_entry *next; uint64_t target_baudrate = bnxt_get_baudrate(link_info); int active_media = IFM_UNKNOWN; bnxt_update_link(softc, true); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (link_info->link_up) ifmr->ifm_status |= IFM_ACTIVE; else ifmr->ifm_status &= ~IFM_ACTIVE; if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; /* * Go through the list of supported media which got prepared * as part of bnxt_add_media_types() using api ifmedia_add(). */ LIST_FOREACH(next, &(iflib_get_media(ctx)->ifm_list), ifm_list) { if (ifmedia_baudrate(next->ifm_media) == target_baudrate) { active_media = next->ifm_media; break; } } ifmr->ifm_active |= active_media; if (link_info->flow_ctrl.rx) ifmr->ifm_active |= IFM_ETH_RXPAUSE; if (link_info->flow_ctrl.tx) ifmr->ifm_active |= IFM_ETH_TXPAUSE; bnxt_report_link(softc); return; } static int bnxt_media_change(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct ifmedia *ifm = iflib_get_media(ctx); struct ifmediareq ifmr; int rc; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return EINVAL; switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_100_T: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB; break; case IFM_1000_KX: case IFM_1000_T: case IFM_1000_SGMII: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB; break; case IFM_2500_KX: case IFM_2500_T: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB; break; case IFM_10G_CR1: case IFM_10G_KR: case IFM_10G_LR: case IFM_10G_SR: case IFM_10G_T: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB; break; case IFM_20G_KR2: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB; break; case IFM_25G_CR: case IFM_25G_KR: case IFM_25G_SR: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB; break; case IFM_40G_CR4: case IFM_40G_KR4: case IFM_40G_LR4: case IFM_40G_SR4: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB; break; case IFM_50G_CR2: case IFM_50G_KR2: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB; break; case IFM_100G_CR4: case IFM_100G_KR4: case IFM_100G_LR4: case IFM_100G_SR4: softc->link_info.autoneg &= ~BNXT_AUTONEG_SPEED; softc->link_info.req_link_speed = HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB; break; default: device_printf(softc->dev, "Unsupported media type! Using auto\n"); /* Fall-through */ case IFM_AUTO: // Auto softc->link_info.autoneg |= BNXT_AUTONEG_SPEED; break; } rc = bnxt_hwrm_set_link_setting(softc, true, true, true); bnxt_media_status(softc->ctx, &ifmr); return rc; } static int bnxt_promisc_set(if_ctx_t ctx, int flags) { struct bnxt_softc *softc = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); int rc; if (ifp->if_flags & IFF_ALLMULTI || if_multiaddr_count(ifp, -1) > BNXT_MAX_MC_ADDRS) softc->vnic_info.rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; else softc->vnic_info.rx_mask &= ~HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; if (ifp->if_flags & IFF_PROMISC) softc->vnic_info.rx_mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN; else softc->vnic_info.rx_mask &= ~(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS | HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ANYVLAN_NONVLAN); rc = bnxt_hwrm_cfa_l2_set_rx_mask(softc, &softc->vnic_info); return rc; } static uint64_t bnxt_get_counter(if_ctx_t ctx, ift_counter cnt) { if_t ifp = iflib_get_ifp(ctx); if (cnt < IFCOUNTERS) return if_get_counter_default(ifp, cnt); return 0; } static void bnxt_update_admin_status(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); /* * When SR-IOV is enabled, avoid each VF sending this HWRM * request every sec with which firmware timeouts can happen */ if (BNXT_PF(softc)) { bnxt_hwrm_port_qstats(softc); } return; } static void bnxt_if_timer(if_ctx_t ctx, uint16_t qid) { struct bnxt_softc *softc = iflib_get_softc(ctx); uint64_t ticks_now = ticks; /* Schedule bnxt_update_admin_status() once per sec */ if (ticks_now - softc->admin_ticks >= hz) { softc->admin_ticks = ticks_now; iflib_admin_intr_deferred(ctx); } return; } static void inline bnxt_do_enable_intr(struct bnxt_cp_ring *cpr) { if (cpr->ring.phys_id != (uint16_t)HWRM_NA_SIGNATURE) { /* First time enabling, do not set index */ if (cpr->cons == UINT32_MAX) BNXT_CP_ENABLE_DB(&cpr->ring); else BNXT_CP_IDX_ENABLE_DB(&cpr->ring, cpr->cons); } } static void inline bnxt_do_disable_intr(struct bnxt_cp_ring *cpr) { if (cpr->ring.phys_id != (uint16_t)HWRM_NA_SIGNATURE) BNXT_CP_DISABLE_DB(&cpr->ring); } /* Enable all interrupts */ static void bnxt_intr_enable(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); int i; bnxt_do_enable_intr(&softc->def_cp_ring); for (i = 0; i < softc->nrxqsets; i++) bnxt_do_enable_intr(&softc->rx_cp_rings[i]); return; } /* Enable interrupt for a single queue */ static int bnxt_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct bnxt_softc *softc = iflib_get_softc(ctx); bnxt_do_enable_intr(&softc->tx_cp_rings[qid]); return 0; } static int bnxt_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct bnxt_softc *softc = iflib_get_softc(ctx); bnxt_do_enable_intr(&softc->rx_cp_rings[qid]); return 0; } /* Disable all interrupts */ static void bnxt_disable_intr(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); int i; /* * NOTE: These TX interrupts should never get enabled, so don't * update the index */ for (i = 0; i < softc->ntxqsets; i++) bnxt_do_disable_intr(&softc->tx_cp_rings[i]); for (i = 0; i < softc->nrxqsets; i++) bnxt_do_disable_intr(&softc->rx_cp_rings[i]); return; } static int bnxt_msix_intr_assign(if_ctx_t ctx, int msix) { struct bnxt_softc *softc = iflib_get_softc(ctx); int rc; int i; char irq_name[16]; rc = iflib_irq_alloc_generic(ctx, &softc->def_cp_ring.irq, softc->def_cp_ring.ring.id + 1, IFLIB_INTR_ADMIN, bnxt_handle_def_cp, softc, 0, "def_cp"); if (rc) { device_printf(iflib_get_dev(ctx), "Failed to register default completion ring handler\n"); return rc; } for (i=0; iscctx->isc_nrxqsets; i++) { snprintf(irq_name, sizeof(irq_name), "rxq%d", i); rc = iflib_irq_alloc_generic(ctx, &softc->rx_cp_rings[i].irq, softc->rx_cp_rings[i].ring.id + 1, IFLIB_INTR_RX, bnxt_handle_rx_cp, &softc->rx_cp_rings[i], i, irq_name); if (rc) { device_printf(iflib_get_dev(ctx), "Failed to register RX completion ring handler\n"); i--; goto fail; } } for (i=0; iscctx->isc_ntxqsets; i++) iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, "tx_cp"); return rc; fail: for (; i>=0; i--) iflib_irq_free(ctx, &softc->rx_cp_rings[i].irq); iflib_irq_free(ctx, &softc->def_cp_ring.irq); return rc; } /* * We're explicitly allowing duplicates here. They will need to be * removed as many times as they are added. */ static void bnxt_vlan_register(if_ctx_t ctx, uint16_t vtag) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct bnxt_vlan_tag *new_tag; new_tag = malloc(sizeof(struct bnxt_vlan_tag), M_DEVBUF, M_NOWAIT); if (new_tag == NULL) return; new_tag->tag = vtag; new_tag->tpid = 8100; SLIST_INSERT_HEAD(&softc->vnic_info.vlan_tags, new_tag, next); }; static void bnxt_vlan_unregister(if_ctx_t ctx, uint16_t vtag) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct bnxt_vlan_tag *vlan_tag; SLIST_FOREACH(vlan_tag, &softc->vnic_info.vlan_tags, next) { if (vlan_tag->tag == vtag) { SLIST_REMOVE(&softc->vnic_info.vlan_tags, vlan_tag, bnxt_vlan_tag, next); free(vlan_tag, M_DEVBUF); break; } } } static int bnxt_wol_config(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); if (!softc) return -EBUSY; if (!bnxt_wol_supported(softc)) return -ENOTSUP; if (if_getcapabilities(ifp) & IFCAP_WOL_MAGIC) { if (!softc->wol) { if (bnxt_hwrm_alloc_wol_fltr(softc)) return -EBUSY; softc->wol = 1; } } else { if (softc->wol) { if (bnxt_hwrm_free_wol_fltr(softc)) return -EBUSY; softc->wol = 0; } } return 0; } static int bnxt_shutdown(if_ctx_t ctx) { bnxt_wol_config(ctx); return 0; } static int bnxt_suspend(if_ctx_t ctx) { bnxt_wol_config(ctx); return 0; } static int bnxt_resume(if_ctx_t ctx) { struct bnxt_softc *softc = iflib_get_softc(ctx); bnxt_get_wol_settings(softc); return 0; } static int bnxt_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data) { struct bnxt_softc *softc = iflib_get_softc(ctx); struct ifreq *ifr = (struct ifreq *)data; struct ifreq_buffer *ifbuf = &ifr->ifr_ifru.ifru_buffer; struct bnxt_ioctl_header *ioh = (struct bnxt_ioctl_header *)(ifbuf->buffer); int rc = ENOTSUP; struct bnxt_ioctl_data *iod = NULL; switch (command) { case SIOCGPRIVATE_0: if ((rc = priv_check(curthread, PRIV_DRIVER)) != 0) goto exit; iod = malloc(ifbuf->length, M_DEVBUF, M_NOWAIT | M_ZERO); if (!iod) { rc = ENOMEM; goto exit; } copyin(ioh, iod, ifbuf->length); switch (ioh->type) { case BNXT_HWRM_NVM_FIND_DIR_ENTRY: { struct bnxt_ioctl_hwrm_nvm_find_dir_entry *find = &iod->find; rc = bnxt_hwrm_nvm_find_dir_entry(softc, find->type, &find->ordinal, find->ext, &find->index, find->use_index, find->search_opt, &find->data_length, &find->item_length, &find->fw_ver); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, ifbuf->length); } rc = 0; goto exit; } case BNXT_HWRM_NVM_READ: { struct bnxt_ioctl_hwrm_nvm_read *rd = &iod->read; struct iflib_dma_info dma_data; size_t offset; size_t remain; size_t csize; /* * Some HWRM versions can't read more than 0x8000 bytes */ rc = iflib_dma_alloc(softc->ctx, min(rd->length, 0x8000), &dma_data, BUS_DMA_NOWAIT); if (rc) break; for (remain = rd->length, offset = 0; remain && offset < rd->length; offset += 0x8000) { csize = min(remain, 0x8000); rc = bnxt_hwrm_nvm_read(softc, rd->index, rd->offset + offset, csize, &dma_data); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); break; } else { copyout(dma_data.idi_vaddr, rd->data + offset, csize); iod->hdr.rc = 0; } remain -= csize; } if (iod->hdr.rc == 0) copyout(iod, ioh, ifbuf->length); iflib_dma_free(&dma_data); rc = 0; goto exit; } case BNXT_HWRM_FW_RESET: { struct bnxt_ioctl_hwrm_fw_reset *rst = &iod->reset; rc = bnxt_hwrm_fw_reset(softc, rst->processor, &rst->selfreset); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, ifbuf->length); } rc = 0; goto exit; } case BNXT_HWRM_FW_QSTATUS: { struct bnxt_ioctl_hwrm_fw_qstatus *qstat = &iod->status; rc = bnxt_hwrm_fw_qstatus(softc, qstat->processor, &qstat->selfreset); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, ifbuf->length); } rc = 0; goto exit; } case BNXT_HWRM_NVM_WRITE: { struct bnxt_ioctl_hwrm_nvm_write *wr = &iod->write; rc = bnxt_hwrm_nvm_write(softc, wr->data, true, wr->type, wr->ordinal, wr->ext, wr->attr, wr->option, wr->data_length, wr->keep, &wr->item_length, &wr->index); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, ifbuf->length); } rc = 0; goto exit; } case BNXT_HWRM_NVM_ERASE_DIR_ENTRY: { struct bnxt_ioctl_hwrm_nvm_erase_dir_entry *erase = &iod->erase; rc = bnxt_hwrm_nvm_erase_dir_entry(softc, erase->index); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, ifbuf->length); } rc = 0; goto exit; } case BNXT_HWRM_NVM_GET_DIR_INFO: { struct bnxt_ioctl_hwrm_nvm_get_dir_info *info = &iod->dir_info; rc = bnxt_hwrm_nvm_get_dir_info(softc, &info->entries, &info->entry_length); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, ifbuf->length); } rc = 0; goto exit; } case BNXT_HWRM_NVM_GET_DIR_ENTRIES: { struct bnxt_ioctl_hwrm_nvm_get_dir_entries *get = &iod->dir_entries; struct iflib_dma_info dma_data; rc = iflib_dma_alloc(softc->ctx, get->max_size, &dma_data, BUS_DMA_NOWAIT); if (rc) break; rc = bnxt_hwrm_nvm_get_dir_entries(softc, &get->entries, &get->entry_length, &dma_data); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { copyout(dma_data.idi_vaddr, get->data, get->entry_length * get->entries); iod->hdr.rc = 0; copyout(iod, ioh, ifbuf->length); } iflib_dma_free(&dma_data); rc = 0; goto exit; } case BNXT_HWRM_NVM_VERIFY_UPDATE: { struct bnxt_ioctl_hwrm_nvm_verify_update *vrfy = &iod->verify; rc = bnxt_hwrm_nvm_verify_update(softc, vrfy->type, vrfy->ordinal, vrfy->ext); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, ifbuf->length); } rc = 0; goto exit; } case BNXT_HWRM_NVM_INSTALL_UPDATE: { struct bnxt_ioctl_hwrm_nvm_install_update *inst = &iod->install; rc = bnxt_hwrm_nvm_install_update(softc, inst->install_type, &inst->installed_items, &inst->result, &inst->problem_item, &inst->reset_required); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, ifbuf->length); } rc = 0; goto exit; } case BNXT_HWRM_NVM_MODIFY: { struct bnxt_ioctl_hwrm_nvm_modify *mod = &iod->modify; rc = bnxt_hwrm_nvm_modify(softc, mod->index, mod->offset, mod->data, true, mod->length); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, ifbuf->length); } rc = 0; goto exit; } case BNXT_HWRM_FW_GET_TIME: { struct bnxt_ioctl_hwrm_fw_get_time *gtm = &iod->get_time; rc = bnxt_hwrm_fw_get_time(softc, >m->year, >m->month, >m->day, >m->hour, >m->minute, >m->second, >m->millisecond, >m->zone); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, ifbuf->length); } rc = 0; goto exit; } case BNXT_HWRM_FW_SET_TIME: { struct bnxt_ioctl_hwrm_fw_set_time *stm = &iod->set_time; rc = bnxt_hwrm_fw_set_time(softc, stm->year, stm->month, stm->day, stm->hour, stm->minute, stm->second, stm->millisecond, stm->zone); if (rc) { iod->hdr.rc = rc; copyout(&iod->hdr.rc, &ioh->rc, sizeof(ioh->rc)); } else { iod->hdr.rc = 0; copyout(iod, ioh, ifbuf->length); } rc = 0; goto exit; } } break; } exit: if (iod) free(iod, M_DEVBUF); return rc; } /* * Support functions */ static int bnxt_probe_phy(struct bnxt_softc *softc) { struct bnxt_link_info *link_info = &softc->link_info; int rc = 0; rc = bnxt_update_link(softc, false); if (rc) { device_printf(softc->dev, "Probe phy can't update link (rc: %x)\n", rc); return (rc); } /*initialize the ethool setting copy with NVM settings */ if (link_info->auto_mode != HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE) link_info->autoneg |= BNXT_AUTONEG_SPEED; link_info->req_duplex = link_info->duplex_setting; if (link_info->autoneg & BNXT_AUTONEG_SPEED) link_info->req_link_speed = link_info->auto_link_speed; else link_info->req_link_speed = link_info->force_link_speed; return (rc); } static void bnxt_add_media_types(struct bnxt_softc *softc) { struct bnxt_link_info *link_info = &softc->link_info; uint16_t supported; uint8_t phy_type = get_phy_type(softc); supported = link_info->support_speeds; /* Auto is always supported */ ifmedia_add(softc->media, IFM_ETHER | IFM_AUTO, 0, NULL); if (softc->flags & BNXT_FLAG_NPAR) return; switch (phy_type) { case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASECR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASECR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_L: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_S: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASECR_CA_N: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR: BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_CR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_CR2); BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_CR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_CR); BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_CR1); BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_T); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASELR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASELR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR: BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_LR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_LR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_LR); BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_LR); BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_LX); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR10: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASESR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASESR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_BASEER4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_100G_BASEER4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_25G_BASESR: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASESX: BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_SR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_SR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_SR); BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_SR); BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SX); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR: BNXT_IFMEDIA_ADD(supported, SPEEDS_100GB, IFM_100G_KR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_50GB, IFM_50G_KR2); BNXT_IFMEDIA_ADD(supported, SPEEDS_40GB, IFM_40G_KR4); BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_KR); BNXT_IFMEDIA_ADD(supported, SPEEDS_20GB, IFM_20G_KR2); BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR); BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_40G_ACTIVE_CABLE: BNXT_IFMEDIA_ADD(supported, SPEEDS_25GB, IFM_25G_ACC); BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_AOC); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASECX: BNXT_IFMEDIA_ADD(supported, SPEEDS_1GBHD, IFM_1000_CX); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_1G_BASET: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET: case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE: BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_T); BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_T); BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_T); BNXT_IFMEDIA_ADD(supported, SPEEDS_100MB, IFM_100_T); BNXT_IFMEDIA_ADD(supported, SPEEDS_10MB, IFM_10_T); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX: BNXT_IFMEDIA_ADD(supported, SPEEDS_10GB, IFM_10G_KR); BNXT_IFMEDIA_ADD(supported, SPEEDS_2_5GB, IFM_2500_KX); BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_KX); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY: BNXT_IFMEDIA_ADD(supported, SPEEDS_1GB, IFM_1000_SGMII); break; case HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN: /* Only Autoneg is supported for TYPE_UNKNOWN */ device_printf(softc->dev, "Unknown phy type\n"); break; default: /* Only Autoneg is supported for new phy type values */ device_printf(softc->dev, "phy type %d not supported by driver\n", phy_type); break; } return; } static int bnxt_map_bar(struct bnxt_softc *softc, struct bnxt_bar_info *bar, int bar_num, bool shareable) { uint32_t flag; if (bar->res != NULL) { device_printf(softc->dev, "Bar %d already mapped\n", bar_num); return EDOOFUS; } bar->rid = PCIR_BAR(bar_num); flag = RF_ACTIVE; if (shareable) flag |= RF_SHAREABLE; if ((bar->res = bus_alloc_resource_any(softc->dev, SYS_RES_MEMORY, &bar->rid, flag)) == NULL) { device_printf(softc->dev, "PCI BAR%d mapping failure\n", bar_num); return (ENXIO); } bar->tag = rman_get_bustag(bar->res); bar->handle = rman_get_bushandle(bar->res); bar->size = rman_get_size(bar->res); return 0; } static int bnxt_pci_mapping(struct bnxt_softc *softc) { int rc; rc = bnxt_map_bar(softc, &softc->hwrm_bar, 0, true); if (rc) return rc; rc = bnxt_map_bar(softc, &softc->doorbell_bar, 2, false); return rc; } static void bnxt_pci_mapping_free(struct bnxt_softc *softc) { if (softc->hwrm_bar.res != NULL) bus_release_resource(softc->dev, SYS_RES_MEMORY, softc->hwrm_bar.rid, softc->hwrm_bar.res); softc->hwrm_bar.res = NULL; if (softc->doorbell_bar.res != NULL) bus_release_resource(softc->dev, SYS_RES_MEMORY, softc->doorbell_bar.rid, softc->doorbell_bar.res); softc->doorbell_bar.res = NULL; } static int bnxt_update_link(struct bnxt_softc *softc, bool chng_link_state) { struct bnxt_link_info *link_info = &softc->link_info; uint8_t link_up = link_info->link_up; int rc = 0; rc = bnxt_hwrm_port_phy_qcfg(softc); if (rc) goto exit; /* TODO: need to add more logic to report VF link */ if (chng_link_state) { if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) link_info->link_up = 1; else link_info->link_up = 0; if (link_up != link_info->link_up) bnxt_report_link(softc); } else { /* always link down if not require to update link state */ link_info->link_up = 0; } exit: return rc; } void bnxt_report_link(struct bnxt_softc *softc) { struct bnxt_link_info *link_info = &softc->link_info; const char *duplex = NULL, *flow_ctrl = NULL; if (link_info->link_up == link_info->last_link_up) { if (!link_info->link_up) return; if ((link_info->duplex == link_info->last_duplex) && (!(BNXT_IS_FLOW_CTRL_CHANGED(link_info)))) return; } if (link_info->link_up) { if (link_info->duplex == HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_CFG_FULL) duplex = "full duplex"; else duplex = "half duplex"; if (link_info->flow_ctrl.tx & link_info->flow_ctrl.rx) flow_ctrl = "FC - receive & transmit"; else if (link_info->flow_ctrl.tx) flow_ctrl = "FC - transmit"; else if (link_info->flow_ctrl.rx) flow_ctrl = "FC - receive"; else flow_ctrl = "FC - none"; iflib_link_state_change(softc->ctx, LINK_STATE_UP, IF_Gbps(100)); device_printf(softc->dev, "Link is UP %s, %s - %d Mbps \n", duplex, flow_ctrl, (link_info->link_speed * 100)); } else { iflib_link_state_change(softc->ctx, LINK_STATE_DOWN, bnxt_get_baudrate(&softc->link_info)); device_printf(softc->dev, "Link is Down\n"); } link_info->last_link_up = link_info->link_up; link_info->last_duplex = link_info->duplex; link_info->last_flow_ctrl.tx = link_info->flow_ctrl.tx; link_info->last_flow_ctrl.rx = link_info->flow_ctrl.rx; link_info->last_flow_ctrl.autoneg = link_info->flow_ctrl.autoneg; /* update media types */ ifmedia_removeall(softc->media); bnxt_add_media_types(softc); ifmedia_set(softc->media, IFM_ETHER | IFM_AUTO); } static int bnxt_handle_rx_cp(void *arg) { struct bnxt_cp_ring *cpr = arg; /* Disable further interrupts for this queue */ BNXT_CP_DISABLE_DB(&cpr->ring); return FILTER_SCHEDULE_THREAD; } static int bnxt_handle_def_cp(void *arg) { struct bnxt_softc *softc = arg; BNXT_CP_DISABLE_DB(&softc->def_cp_ring.ring); GROUPTASK_ENQUEUE(&softc->def_cp_task); return FILTER_HANDLED; } static void bnxt_clear_ids(struct bnxt_softc *softc) { int i; softc->def_cp_ring.stats_ctx_id = HWRM_NA_SIGNATURE; softc->def_cp_ring.ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; for (i = 0; i < softc->ntxqsets; i++) { softc->tx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE; softc->tx_cp_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->tx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE; } for (i = 0; i < softc->nrxqsets; i++) { softc->rx_cp_rings[i].stats_ctx_id = HWRM_NA_SIGNATURE; softc->rx_cp_rings[i].ring.phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->rx_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->ag_rings[i].phys_id = (uint16_t)HWRM_NA_SIGNATURE; softc->grp_info[i].grp_id = (uint16_t)HWRM_NA_SIGNATURE; } softc->vnic_info.filter_id = -1; softc->vnic_info.id = (uint16_t)HWRM_NA_SIGNATURE; softc->vnic_info.rss_id = (uint16_t)HWRM_NA_SIGNATURE; memset(softc->vnic_info.rss_grp_tbl.idi_vaddr, 0xff, softc->vnic_info.rss_grp_tbl.idi_size); } static void bnxt_mark_cpr_invalid(struct bnxt_cp_ring *cpr) { struct cmpl_base *cmp = (void *)cpr->ring.vaddr; int i; for (i = 0; i < cpr->ring.ring_size; i++) cmp[i].info3_v = !cpr->v_bit; } static void bnxt_handle_async_event(struct bnxt_softc *softc, struct cmpl_base *cmpl) { struct hwrm_async_event_cmpl *ae = (void *)cmpl; uint16_t async_id = le16toh(ae->event_id); struct ifmediareq ifmr; switch (async_id) { case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: bnxt_media_status(softc->ctx, &ifmr); break; case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE: case HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR: device_printf(softc->dev, "Unhandled async completion type %u\n", async_id); break; default: device_printf(softc->dev, "Unknown async completion type %u\n", async_id); break; } } static void bnxt_def_cp_task(void *context) { if_ctx_t ctx = context; struct bnxt_softc *softc = iflib_get_softc(ctx); struct bnxt_cp_ring *cpr = &softc->def_cp_ring; /* Handle completions on the default completion ring */ struct cmpl_base *cmpl; uint32_t cons = cpr->cons; bool v_bit = cpr->v_bit; bool last_v_bit; uint32_t last_cons; uint16_t type; for (;;) { last_cons = cons; last_v_bit = v_bit; NEXT_CP_CONS_V(&cpr->ring, cons, v_bit); cmpl = &((struct cmpl_base *)cpr->ring.vaddr)[cons]; if (!CMP_VALID(cmpl, v_bit)) break; type = le16toh(cmpl->type) & CMPL_BASE_TYPE_MASK; switch (type) { case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT: bnxt_handle_async_event(softc, cmpl); break; case CMPL_BASE_TYPE_TX_L2: case CMPL_BASE_TYPE_RX_L2: case CMPL_BASE_TYPE_RX_AGG: case CMPL_BASE_TYPE_RX_TPA_START: case CMPL_BASE_TYPE_RX_TPA_END: case CMPL_BASE_TYPE_STAT_EJECT: case CMPL_BASE_TYPE_HWRM_DONE: case CMPL_BASE_TYPE_HWRM_FWD_REQ: case CMPL_BASE_TYPE_HWRM_FWD_RESP: case CMPL_BASE_TYPE_CQ_NOTIFICATION: case CMPL_BASE_TYPE_SRQ_EVENT: case CMPL_BASE_TYPE_DBQ_EVENT: case CMPL_BASE_TYPE_QP_EVENT: case CMPL_BASE_TYPE_FUNC_EVENT: device_printf(softc->dev, "Unhandled completion type %u\n", type); break; default: device_printf(softc->dev, "Unknown completion type %u\n", type); break; } } cpr->cons = last_cons; cpr->v_bit = last_v_bit; BNXT_CP_IDX_ENABLE_DB(&cpr->ring, cpr->cons); } static uint8_t get_phy_type(struct bnxt_softc *softc) { struct bnxt_link_info *link_info = &softc->link_info; uint8_t phy_type = link_info->phy_type; uint16_t supported; if (phy_type != HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN) return phy_type; /* Deduce the phy type from the media type and supported speeds */ supported = link_info->support_speeds; if (link_info->media_type == HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET; if (link_info->media_type == HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC) { if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB) return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX; if (supported & HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB) return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR; return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR; } if (link_info->media_type == HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE) return HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR; return phy_type; } bool bnxt_check_hwrm_version(struct bnxt_softc *softc) { char buf[16]; sprintf(buf, "%hhu.%hhu.%hhu", softc->ver_info->hwrm_min_major, softc->ver_info->hwrm_min_minor, softc->ver_info->hwrm_min_update); if (softc->ver_info->hwrm_min_major > softc->ver_info->hwrm_if_major) { device_printf(softc->dev, "WARNING: HWRM version %s is too old (older than %s)\n", softc->ver_info->hwrm_if_ver, buf); return false; } else if(softc->ver_info->hwrm_min_major == softc->ver_info->hwrm_if_major) { if (softc->ver_info->hwrm_min_minor > softc->ver_info->hwrm_if_minor) { device_printf(softc->dev, "WARNING: HWRM version %s is too old (older than %s)\n", softc->ver_info->hwrm_if_ver, buf); return false; } else if (softc->ver_info->hwrm_min_minor == softc->ver_info->hwrm_if_minor) { if (softc->ver_info->hwrm_min_update > softc->ver_info->hwrm_if_update) { device_printf(softc->dev, "WARNING: HWRM version %s is too old (older than %s)\n", softc->ver_info->hwrm_if_ver, buf); return false; } } } return true; } static uint64_t bnxt_get_baudrate(struct bnxt_link_info *link) { switch (link->link_speed) { case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB: return IF_Mbps(100); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB: return IF_Gbps(1); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB: return IF_Gbps(2); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB: return IF_Mbps(2500); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB: return IF_Gbps(10); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB: return IF_Gbps(20); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB: return IF_Gbps(25); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB: return IF_Gbps(40); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB: return IF_Gbps(50); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB: return IF_Gbps(100); case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB: return IF_Mbps(10); } return IF_Gbps(100); } static void bnxt_get_wol_settings(struct bnxt_softc *softc) { uint16_t wol_handle = 0; if (!bnxt_wol_supported(softc)) return; do { wol_handle = bnxt_hwrm_get_wol_fltrs(softc, wol_handle); } while (wol_handle && wol_handle != BNXT_NO_MORE_WOL_FILTERS); } Index: head/sys/dev/bwn/if_bwn.c =================================================================== --- head/sys/dev/bwn/if_bwn.c (revision 327948) +++ head/sys/dev/bwn/if_bwn.c (revision 327949) @@ -1,7501 +1,7501 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2010 Weongyo Jeong * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); /* * The Broadcom Wireless LAN controller driver. */ #include "opt_bwn.h" #include "opt_wlan.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static SYSCTL_NODE(_hw, OID_AUTO, bwn, CTLFLAG_RD, 0, "Broadcom driver parameters"); /* * Tunable & sysctl variables. */ #ifdef BWN_DEBUG static int bwn_debug = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, debug, CTLFLAG_RWTUN, &bwn_debug, 0, "Broadcom debugging printfs"); #endif static int bwn_bfp = 0; /* use "Bad Frames Preemption" */ SYSCTL_INT(_hw_bwn, OID_AUTO, bfp, CTLFLAG_RW, &bwn_bfp, 0, "uses Bad Frames Preemption"); static int bwn_bluetooth = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, bluetooth, CTLFLAG_RW, &bwn_bluetooth, 0, "turns on Bluetooth Coexistence"); static int bwn_hwpctl = 0; SYSCTL_INT(_hw_bwn, OID_AUTO, hwpctl, CTLFLAG_RW, &bwn_hwpctl, 0, "uses H/W power control"); static int bwn_msi_disable = 0; /* MSI disabled */ TUNABLE_INT("hw.bwn.msi_disable", &bwn_msi_disable); static int bwn_usedma = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, usedma, CTLFLAG_RD, &bwn_usedma, 0, "uses DMA"); TUNABLE_INT("hw.bwn.usedma", &bwn_usedma); static int bwn_wme = 1; SYSCTL_INT(_hw_bwn, OID_AUTO, wme, CTLFLAG_RW, &bwn_wme, 0, "uses WME support"); static void bwn_attach_pre(struct bwn_softc *); static int bwn_attach_post(struct bwn_softc *); static void bwn_sprom_bugfixes(device_t); static int bwn_init(struct bwn_softc *); static void bwn_parent(struct ieee80211com *); static void bwn_start(struct bwn_softc *); static int bwn_transmit(struct ieee80211com *, struct mbuf *); static int bwn_attach_core(struct bwn_mac *); static int bwn_phy_getinfo(struct bwn_mac *, int); static int bwn_chiptest(struct bwn_mac *); static int bwn_setup_channels(struct bwn_mac *, int, int); static void bwn_shm_ctlword(struct bwn_mac *, uint16_t, uint16_t); static void bwn_addchannels(struct ieee80211_channel [], int, int *, const struct bwn_channelinfo *, const uint8_t []); static int bwn_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void bwn_updateslot(struct ieee80211com *); static void bwn_update_promisc(struct ieee80211com *); static void bwn_wme_init(struct bwn_mac *); static int bwn_wme_update(struct ieee80211com *); static void bwn_wme_clear(struct bwn_softc *); static void bwn_wme_load(struct bwn_mac *); static void bwn_wme_loadparams(struct bwn_mac *, const struct wmeParams *, uint16_t); static void bwn_scan_start(struct ieee80211com *); static void bwn_scan_end(struct ieee80211com *); static void bwn_set_channel(struct ieee80211com *); static struct ieee80211vap *bwn_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void bwn_vap_delete(struct ieee80211vap *); static void bwn_stop(struct bwn_softc *); static int bwn_core_init(struct bwn_mac *); static void bwn_core_start(struct bwn_mac *); static void bwn_core_exit(struct bwn_mac *); static void bwn_bt_disable(struct bwn_mac *); static int bwn_chip_init(struct bwn_mac *); static void bwn_set_txretry(struct bwn_mac *, int, int); static void bwn_rate_init(struct bwn_mac *); static void bwn_set_phytxctl(struct bwn_mac *); static void bwn_spu_setdelay(struct bwn_mac *, int); static void bwn_bt_enable(struct bwn_mac *); static void bwn_set_macaddr(struct bwn_mac *); static void bwn_crypt_init(struct bwn_mac *); static void bwn_chip_exit(struct bwn_mac *); static int bwn_fw_fillinfo(struct bwn_mac *); static int bwn_fw_loaducode(struct bwn_mac *); static int bwn_gpio_init(struct bwn_mac *); static int bwn_fw_loadinitvals(struct bwn_mac *); static int bwn_phy_init(struct bwn_mac *); static void bwn_set_txantenna(struct bwn_mac *, int); static void bwn_set_opmode(struct bwn_mac *); static void bwn_rate_write(struct bwn_mac *, uint16_t, int); static uint8_t bwn_plcp_getcck(const uint8_t); static uint8_t bwn_plcp_getofdm(const uint8_t); static void bwn_pio_init(struct bwn_mac *); static uint16_t bwn_pio_idx2base(struct bwn_mac *, int); static void bwn_pio_set_txqueue(struct bwn_mac *, struct bwn_pio_txqueue *, int); static void bwn_pio_setupqueue_rx(struct bwn_mac *, struct bwn_pio_rxqueue *, int); static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *); static uint16_t bwn_pio_read_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t); static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *); static int bwn_pio_rx(struct bwn_pio_rxqueue *); static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *); static void bwn_pio_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *, uint16_t); static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *, uint16_t); static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *, uint16_t, uint16_t); static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *, uint16_t, uint32_t); static int bwn_pio_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf *); static struct bwn_pio_txqueue *bwn_pio_select(struct bwn_mac *, uint8_t); static uint32_t bwn_pio_write_multi_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint32_t, const void *, int); static void bwn_pio_write_4(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, uint32_t); static uint16_t bwn_pio_write_multi_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, const void *, int); static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *, struct bwn_pio_txqueue *, uint16_t, struct mbuf *); static struct bwn_pio_txqueue *bwn_pio_parse_cookie(struct bwn_mac *, uint16_t, struct bwn_pio_txpkt **); static void bwn_dma_init(struct bwn_mac *); static void bwn_dma_rxdirectfifo(struct bwn_mac *, int, uint8_t); static int bwn_dma_mask2type(uint64_t); static uint64_t bwn_dma_mask(struct bwn_mac *); static uint16_t bwn_dma_base(int, int); static void bwn_dma_ringfree(struct bwn_dma_ring **); static void bwn_dma_32_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_32_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_32_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_32_suspend(struct bwn_dma_ring *); static void bwn_dma_32_resume(struct bwn_dma_ring *); static int bwn_dma_32_get_curslot(struct bwn_dma_ring *); static void bwn_dma_32_set_curslot(struct bwn_dma_ring *, int); static void bwn_dma_64_getdesc(struct bwn_dma_ring *, int, struct bwn_dmadesc_generic **, struct bwn_dmadesc_meta **); static void bwn_dma_64_setdesc(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, bus_addr_t, uint16_t, int, int, int); static void bwn_dma_64_start_transfer(struct bwn_dma_ring *, int); static void bwn_dma_64_suspend(struct bwn_dma_ring *); static void bwn_dma_64_resume(struct bwn_dma_ring *); static int bwn_dma_64_get_curslot(struct bwn_dma_ring *); static void bwn_dma_64_set_curslot(struct bwn_dma_ring *, int); static int bwn_dma_allocringmemory(struct bwn_dma_ring *); static void bwn_dma_setup(struct bwn_dma_ring *); static void bwn_dma_free_ringmemory(struct bwn_dma_ring *); static void bwn_dma_cleanup(struct bwn_dma_ring *); static void bwn_dma_free_descbufs(struct bwn_dma_ring *); static int bwn_dma_tx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_rx(struct bwn_dma_ring *); static int bwn_dma_rx_reset(struct bwn_mac *, uint16_t, int); static void bwn_dma_free_descbuf(struct bwn_dma_ring *, struct bwn_dmadesc_meta *); static void bwn_dma_set_redzone(struct bwn_dma_ring *, struct mbuf *); static int bwn_dma_gettype(struct bwn_mac *); static void bwn_dma_ring_addr(void *, bus_dma_segment_t *, int, int); static int bwn_dma_freeslot(struct bwn_dma_ring *); static int bwn_dma_nextslot(struct bwn_dma_ring *, int); static void bwn_dma_rxeof(struct bwn_dma_ring *, int *); static int bwn_dma_newbuf(struct bwn_dma_ring *, struct bwn_dmadesc_generic *, struct bwn_dmadesc_meta *, int); static void bwn_dma_buf_addr(void *, bus_dma_segment_t *, int, bus_size_t, int); static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *, struct mbuf *); static void bwn_ratectl_tx_complete(const struct ieee80211_node *, const struct bwn_txstatus *); static void bwn_dma_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static int bwn_dma_tx_start(struct bwn_mac *, struct ieee80211_node *, struct mbuf *); static int bwn_dma_getslot(struct bwn_dma_ring *); static struct bwn_dma_ring *bwn_dma_select(struct bwn_mac *, uint8_t); static int bwn_dma_attach(struct bwn_mac *); static struct bwn_dma_ring *bwn_dma_ringsetup(struct bwn_mac *, int, int, int); static struct bwn_dma_ring *bwn_dma_parse_cookie(struct bwn_mac *, const struct bwn_txstatus *, uint16_t, int *); static void bwn_dma_free(struct bwn_mac *); static int bwn_fw_gets(struct bwn_mac *, enum bwn_fwtype); static int bwn_fw_get(struct bwn_mac *, enum bwn_fwtype, const char *, struct bwn_fwfile *); static void bwn_release_firmware(struct bwn_mac *); static void bwn_do_release_fw(struct bwn_fwfile *); static uint16_t bwn_fwcaps_read(struct bwn_mac *); static int bwn_fwinitvals_write(struct bwn_mac *, const struct bwn_fwinitvals *, size_t, size_t); static uint16_t bwn_ant2phy(int); static void bwn_mac_write_bssid(struct bwn_mac *); static void bwn_mac_setfilter(struct bwn_mac *, uint16_t, const uint8_t *); static void bwn_key_dowrite(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *, size_t, const uint8_t *); static void bwn_key_macwrite(struct bwn_mac *, uint8_t, const uint8_t *); static void bwn_key_write(struct bwn_mac *, uint8_t, uint8_t, const uint8_t *); static void bwn_phy_exit(struct bwn_mac *); static void bwn_core_stop(struct bwn_mac *); static int bwn_switch_band(struct bwn_softc *, struct ieee80211_channel *); static void bwn_phy_reset(struct bwn_mac *); static int bwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void bwn_set_pretbtt(struct bwn_mac *); static int bwn_intr(void *); static void bwn_intrtask(void *, int); static void bwn_restart(struct bwn_mac *, const char *); static void bwn_intr_ucode_debug(struct bwn_mac *); static void bwn_intr_tbtt_indication(struct bwn_mac *); static void bwn_intr_atim_end(struct bwn_mac *); static void bwn_intr_beacon(struct bwn_mac *); static void bwn_intr_pmq(struct bwn_mac *); static void bwn_intr_noise(struct bwn_mac *); static void bwn_intr_txeof(struct bwn_mac *); static void bwn_hwreset(void *, int); static void bwn_handle_fwpanic(struct bwn_mac *); static void bwn_load_beacon0(struct bwn_mac *); static void bwn_load_beacon1(struct bwn_mac *); static uint32_t bwn_jssi_read(struct bwn_mac *); static void bwn_noise_gensample(struct bwn_mac *); static void bwn_handle_txeof(struct bwn_mac *, const struct bwn_txstatus *); static void bwn_rxeof(struct bwn_mac *, struct mbuf *, const void *); static void bwn_phy_txpower_check(struct bwn_mac *, uint32_t); static int bwn_tx_start(struct bwn_softc *, struct ieee80211_node *, struct mbuf *); static int bwn_tx_isfull(struct bwn_softc *, struct mbuf *); static int bwn_set_txhdr(struct bwn_mac *, struct ieee80211_node *, struct mbuf *, struct bwn_txhdr *, uint16_t); static void bwn_plcp_genhdr(struct bwn_plcp4 *, const uint16_t, const uint8_t); static uint8_t bwn_antenna_sanitize(struct bwn_mac *, uint8_t); static uint8_t bwn_get_fbrate(uint8_t); static void bwn_txpwr(void *, int); static void bwn_tasks(void *); static void bwn_task_15s(struct bwn_mac *); static void bwn_task_30s(struct bwn_mac *); static void bwn_task_60s(struct bwn_mac *); static int bwn_plcp_get_ofdmrate(struct bwn_mac *, struct bwn_plcp6 *, uint8_t); static int bwn_plcp_get_cckrate(struct bwn_mac *, struct bwn_plcp6 *); static void bwn_rx_radiotap(struct bwn_mac *, struct mbuf *, const struct bwn_rxhdr4 *, struct bwn_plcp6 *, int, int, int); static void bwn_tsf_read(struct bwn_mac *, uint64_t *); static void bwn_set_slot_time(struct bwn_mac *, uint16_t); static void bwn_watchdog(void *); static void bwn_dma_stop(struct bwn_mac *); static void bwn_pio_stop(struct bwn_mac *); static void bwn_dma_ringstop(struct bwn_dma_ring **); static void bwn_led_attach(struct bwn_mac *); static void bwn_led_newstate(struct bwn_mac *, enum ieee80211_state); static void bwn_led_event(struct bwn_mac *, int); static void bwn_led_blink_start(struct bwn_mac *, int, int); static void bwn_led_blink_next(void *); static void bwn_led_blink_end(void *); static void bwn_rfswitch(void *); static void bwn_rf_turnon(struct bwn_mac *); static void bwn_rf_turnoff(struct bwn_mac *); static void bwn_sysctl_node(struct bwn_softc *); static struct resource_spec bwn_res_spec_legacy[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static struct resource_spec bwn_res_spec_msi[] = { { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0, 0 } }; static const struct bwn_channelinfo bwn_chantable_bg = { .channels = { { 2412, 1, 30 }, { 2417, 2, 30 }, { 2422, 3, 30 }, { 2427, 4, 30 }, { 2432, 5, 30 }, { 2437, 6, 30 }, { 2442, 7, 30 }, { 2447, 8, 30 }, { 2452, 9, 30 }, { 2457, 10, 30 }, { 2462, 11, 30 }, { 2467, 12, 30 }, { 2472, 13, 30 }, { 2484, 14, 30 } }, .nchannels = 14 }; static const struct bwn_channelinfo bwn_chantable_a = { .channels = { { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5260, 52, 30 }, { 5280, 56, 30 }, { 5300, 60, 30 }, { 5320, 64, 30 }, { 5500, 100, 30 }, { 5520, 104, 30 }, { 5540, 108, 30 }, { 5560, 112, 30 }, { 5580, 116, 30 }, { 5600, 120, 30 }, { 5620, 124, 30 }, { 5640, 128, 30 }, { 5660, 132, 30 }, { 5680, 136, 30 }, { 5700, 140, 30 }, { 5745, 149, 30 }, { 5765, 153, 30 }, { 5785, 157, 30 }, { 5805, 161, 30 }, { 5825, 165, 30 }, { 5920, 184, 30 }, { 5940, 188, 30 }, { 5960, 192, 30 }, { 5980, 196, 30 }, { 6000, 200, 30 }, { 6020, 204, 30 }, { 6040, 208, 30 }, { 6060, 212, 30 }, { 6080, 216, 30 } }, .nchannels = 37 }; #if 0 static const struct bwn_channelinfo bwn_chantable_n = { .channels = { { 5160, 32, 30 }, { 5170, 34, 30 }, { 5180, 36, 30 }, { 5190, 38, 30 }, { 5200, 40, 30 }, { 5210, 42, 30 }, { 5220, 44, 30 }, { 5230, 46, 30 }, { 5240, 48, 30 }, { 5250, 50, 30 }, { 5260, 52, 30 }, { 5270, 54, 30 }, { 5280, 56, 30 }, { 5290, 58, 30 }, { 5300, 60, 30 }, { 5310, 62, 30 }, { 5320, 64, 30 }, { 5330, 66, 30 }, { 5340, 68, 30 }, { 5350, 70, 30 }, { 5360, 72, 30 }, { 5370, 74, 30 }, { 5380, 76, 30 }, { 5390, 78, 30 }, { 5400, 80, 30 }, { 5410, 82, 30 }, { 5420, 84, 30 }, { 5430, 86, 30 }, { 5440, 88, 30 }, { 5450, 90, 30 }, { 5460, 92, 30 }, { 5470, 94, 30 }, { 5480, 96, 30 }, { 5490, 98, 30 }, { 5500, 100, 30 }, { 5510, 102, 30 }, { 5520, 104, 30 }, { 5530, 106, 30 }, { 5540, 108, 30 }, { 5550, 110, 30 }, { 5560, 112, 30 }, { 5570, 114, 30 }, { 5580, 116, 30 }, { 5590, 118, 30 }, { 5600, 120, 30 }, { 5610, 122, 30 }, { 5620, 124, 30 }, { 5630, 126, 30 }, { 5640, 128, 30 }, { 5650, 130, 30 }, { 5660, 132, 30 }, { 5670, 134, 30 }, { 5680, 136, 30 }, { 5690, 138, 30 }, { 5700, 140, 30 }, { 5710, 142, 30 }, { 5720, 144, 30 }, { 5725, 145, 30 }, { 5730, 146, 30 }, { 5735, 147, 30 }, { 5740, 148, 30 }, { 5745, 149, 30 }, { 5750, 150, 30 }, { 5755, 151, 30 }, { 5760, 152, 30 }, { 5765, 153, 30 }, { 5770, 154, 30 }, { 5775, 155, 30 }, { 5780, 156, 30 }, { 5785, 157, 30 }, { 5790, 158, 30 }, { 5795, 159, 30 }, { 5800, 160, 30 }, { 5805, 161, 30 }, { 5810, 162, 30 }, { 5815, 163, 30 }, { 5820, 164, 30 }, { 5825, 165, 30 }, { 5830, 166, 30 }, { 5840, 168, 30 }, { 5850, 170, 30 }, { 5860, 172, 30 }, { 5870, 174, 30 }, { 5880, 176, 30 }, { 5890, 178, 30 }, { 5900, 180, 30 }, { 5910, 182, 30 }, { 5920, 184, 30 }, { 5930, 186, 30 }, { 5940, 188, 30 }, { 5950, 190, 30 }, { 5960, 192, 30 }, { 5970, 194, 30 }, { 5980, 196, 30 }, { 5990, 198, 30 }, { 6000, 200, 30 }, { 6010, 202, 30 }, { 6020, 204, 30 }, { 6030, 206, 30 }, { 6040, 208, 30 }, { 6050, 210, 30 }, { 6060, 212, 30 }, { 6070, 214, 30 }, { 6080, 216, 30 }, { 6090, 218, 30 }, { 6100, 220, 30 }, { 6110, 222, 30 }, { 6120, 224, 30 }, { 6130, 226, 30 }, { 6140, 228, 30 } }, .nchannels = 110 }; #endif #define VENDOR_LED_ACT(vendor) \ { \ .vid = PCI_VENDOR_##vendor, \ .led_act = { BWN_VENDOR_LED_ACT_##vendor } \ } static const struct { uint16_t vid; uint8_t led_act[BWN_LED_MAX]; } bwn_vendor_led_act[] = { VENDOR_LED_ACT(COMPAQ), VENDOR_LED_ACT(ASUSTEK) }; static const uint8_t bwn_default_led_act[BWN_LED_MAX] = { BWN_VENDOR_LED_ACT_DEFAULT }; #undef VENDOR_LED_ACT static const struct { int on_dur; int off_dur; } bwn_led_duration[109] = { [0] = { 400, 100 }, [2] = { 150, 75 }, [4] = { 90, 45 }, [11] = { 66, 34 }, [12] = { 53, 26 }, [18] = { 42, 21 }, [22] = { 35, 17 }, [24] = { 32, 16 }, [36] = { 21, 10 }, [48] = { 16, 8 }, [72] = { 11, 5 }, [96] = { 9, 4 }, [108] = { 7, 3 } }; static const uint16_t bwn_wme_shm_offsets[] = { [0] = BWN_WME_BESTEFFORT, [1] = BWN_WME_BACKGROUND, [2] = BWN_WME_VOICE, [3] = BWN_WME_VIDEO, }; static const struct siba_devid bwn_devs[] = { SIBA_DEV(BROADCOM, 80211, 5, "Revision 5"), SIBA_DEV(BROADCOM, 80211, 6, "Revision 6"), SIBA_DEV(BROADCOM, 80211, 7, "Revision 7"), SIBA_DEV(BROADCOM, 80211, 9, "Revision 9"), SIBA_DEV(BROADCOM, 80211, 10, "Revision 10"), SIBA_DEV(BROADCOM, 80211, 11, "Revision 11"), SIBA_DEV(BROADCOM, 80211, 12, "Revision 12"), SIBA_DEV(BROADCOM, 80211, 13, "Revision 13"), SIBA_DEV(BROADCOM, 80211, 15, "Revision 15"), SIBA_DEV(BROADCOM, 80211, 16, "Revision 16") }; static const struct bwn_bus_ops * bwn_get_bus_ops(device_t dev) { #if BWN_USE_SIBA return (NULL); #else devclass_t bus_cls; bus_cls = device_get_devclass(device_get_parent(dev)); if (bus_cls == devclass_find("bhnd")) return (&bwn_bhnd_bus_ops); else return (&bwn_siba_bus_ops); #endif } static int bwn_probe(device_t dev) { struct bwn_softc *sc; int i; sc = device_get_softc(dev); sc->sc_bus_ops = bwn_get_bus_ops(dev); for (i = 0; i < nitems(bwn_devs); i++) { if (siba_get_vendor(dev) == bwn_devs[i].sd_vendor && siba_get_device(dev) == bwn_devs[i].sd_device && siba_get_revid(dev) == bwn_devs[i].sd_rev) return (BUS_PROBE_DEFAULT); } return (ENXIO); } int bwn_attach(device_t dev) { struct bwn_mac *mac; struct bwn_softc *sc = device_get_softc(dev); int error, i, msic, reg; sc->sc_dev = dev; #ifdef BWN_DEBUG sc->sc_debug = bwn_debug; #endif sc->sc_bus_ops = bwn_get_bus_ops(dev); if ((error = BWN_BUS_OPS_ATTACH(dev))) { device_printf(sc->sc_dev, "bus-specific initialization failed (%d)\n", error); return (error); } if ((sc->sc_flags & BWN_FLAG_ATTACHED) == 0) { bwn_attach_pre(sc); bwn_sprom_bugfixes(dev); sc->sc_flags |= BWN_FLAG_ATTACHED; } if (!TAILQ_EMPTY(&sc->sc_maclist)) { if (siba_get_pci_device(dev) != 0x4313 && siba_get_pci_device(dev) != 0x431a && siba_get_pci_device(dev) != 0x4321) { device_printf(sc->sc_dev, "skip 802.11 cores\n"); return (ENODEV); } } mac = malloc(sizeof(*mac), M_DEVBUF, M_WAITOK | M_ZERO); mac->mac_sc = sc; mac->mac_status = BWN_MAC_STATUS_UNINIT; if (bwn_bfp != 0) mac->mac_flags |= BWN_MAC_FLAG_BADFRAME_PREEMP; TASK_INIT(&mac->mac_hwreset, 0, bwn_hwreset, mac); TASK_INIT(&mac->mac_intrtask, 0, bwn_intrtask, mac); TASK_INIT(&mac->mac_txpower, 0, bwn_txpwr, mac); error = bwn_attach_core(mac); if (error) goto fail0; bwn_led_attach(mac); device_printf(sc->sc_dev, "WLAN (chipid %#x rev %u) " "PHY (analog %d type %d rev %d) RADIO (manuf %#x ver %#x rev %d)\n", siba_get_chipid(sc->sc_dev), siba_get_revid(sc->sc_dev), mac->mac_phy.analog, mac->mac_phy.type, mac->mac_phy.rev, mac->mac_phy.rf_manuf, mac->mac_phy.rf_ver, mac->mac_phy.rf_rev); if (mac->mac_flags & BWN_MAC_FLAG_DMA) device_printf(sc->sc_dev, "DMA (%d bits)\n", mac->mac_method.dma.dmatype); else device_printf(sc->sc_dev, "PIO\n"); #ifdef BWN_GPL_PHY device_printf(sc->sc_dev, "Note: compiled with BWN_GPL_PHY; includes GPLv2 code\n"); #endif /* * setup PCI resources and interrupt. */ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { msic = pci_msi_count(dev); if (bootverbose) device_printf(sc->sc_dev, "MSI count : %d\n", msic); } else msic = 0; mac->mac_intr_spec = bwn_res_spec_legacy; if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0) { if (pci_alloc_msi(dev, &msic) == 0) { device_printf(sc->sc_dev, "Using %d MSI messages\n", msic); mac->mac_intr_spec = bwn_res_spec_msi; mac->mac_msi = 1; } } error = bus_alloc_resources(dev, mac->mac_intr_spec, mac->mac_res_irq); if (error) { device_printf(sc->sc_dev, "couldn't allocate IRQ resources (%d)\n", error); goto fail1; } if (mac->mac_msi == 0) error = bus_setup_intr(dev, mac->mac_res_irq[0], INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac, &mac->mac_intrhand[0]); else { for (i = 0; i < BWN_MSI_MESSAGES; i++) { error = bus_setup_intr(dev, mac->mac_res_irq[i], INTR_TYPE_NET | INTR_MPSAFE, bwn_intr, NULL, mac, &mac->mac_intrhand[i]); if (error != 0) { device_printf(sc->sc_dev, "couldn't setup interrupt (%d)\n", error); break; } } } TAILQ_INSERT_TAIL(&sc->sc_maclist, mac, mac_list); /* * calls attach-post routine */ if ((sc->sc_flags & BWN_FLAG_ATTACHED) != 0) bwn_attach_post(sc); return (0); fail1: if (msic == BWN_MSI_MESSAGES && bwn_msi_disable == 0) pci_release_msi(dev); fail0: BWN_BUS_OPS_DETACH(dev); free(mac, M_DEVBUF); return (error); } static int bwn_is_valid_ether_addr(uint8_t *addr) { char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) return (FALSE); return (TRUE); } static int bwn_attach_post(struct bwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; ic->ic_softc = sc; ic->ic_name = device_get_nameunit(sc->sc_dev); /* XXX not right but it's not used anywhere important */ ic->ic_phytype = IEEE80211_T_OFDM; ic->ic_opmode = IEEE80211_M_STA; ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WME /* WME/WMM supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ #if 0 | IEEE80211_C_BGSCAN /* capable of bg scanning */ #endif | IEEE80211_C_TXPMGT /* capable of txpow mgt */ ; ic->ic_flags_ext |= IEEE80211_FEXT_SWBMISS; /* s/w bmiss */ IEEE80211_ADDR_COPY(ic->ic_macaddr, bwn_is_valid_ether_addr(siba_sprom_get_mac_80211a(sc->sc_dev)) ? siba_sprom_get_mac_80211a(sc->sc_dev) : siba_sprom_get_mac_80211bg(sc->sc_dev)); /* call MI attach routine. */ ieee80211_ifattach(ic); ic->ic_headroom = sizeof(struct bwn_txhdr); /* override default methods */ ic->ic_raw_xmit = bwn_raw_xmit; ic->ic_updateslot = bwn_updateslot; ic->ic_update_promisc = bwn_update_promisc; ic->ic_wme.wme_update = bwn_wme_update; ic->ic_scan_start = bwn_scan_start; ic->ic_scan_end = bwn_scan_end; ic->ic_set_channel = bwn_set_channel; ic->ic_vap_create = bwn_vap_create; ic->ic_vap_delete = bwn_vap_delete; ic->ic_transmit = bwn_transmit; ic->ic_parent = bwn_parent; ieee80211_radiotap_attach(ic, &sc->sc_tx_th.wt_ihdr, sizeof(sc->sc_tx_th), BWN_TX_RADIOTAP_PRESENT, &sc->sc_rx_th.wr_ihdr, sizeof(sc->sc_rx_th), BWN_RX_RADIOTAP_PRESENT); bwn_sysctl_node(sc); if (bootverbose) ieee80211_announce(ic); return (0); } static void bwn_phy_detach(struct bwn_mac *mac) { if (mac->mac_phy.detach != NULL) mac->mac_phy.detach(mac); } int bwn_detach(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); struct bwn_mac *mac = sc->sc_curmac; struct ieee80211com *ic = &sc->sc_ic; int i; sc->sc_flags |= BWN_FLAG_INVALID; if (device_is_attached(sc->sc_dev)) { BWN_LOCK(sc); bwn_stop(sc); BWN_UNLOCK(sc); bwn_dma_free(mac); callout_drain(&sc->sc_led_blink_ch); callout_drain(&sc->sc_rfswitch_ch); callout_drain(&sc->sc_task_ch); callout_drain(&sc->sc_watchdog_ch); bwn_phy_detach(mac); ieee80211_draintask(ic, &mac->mac_hwreset); ieee80211_draintask(ic, &mac->mac_txpower); ieee80211_ifdetach(ic); } taskqueue_drain(sc->sc_tq, &mac->mac_intrtask); taskqueue_free(sc->sc_tq); for (i = 0; i < BWN_MSI_MESSAGES; i++) { if (mac->mac_intrhand[i] != NULL) { bus_teardown_intr(dev, mac->mac_res_irq[i], mac->mac_intrhand[i]); mac->mac_intrhand[i] = NULL; } } bus_release_resources(dev, mac->mac_intr_spec, mac->mac_res_irq); if (mac->mac_msi != 0) pci_release_msi(dev); mbufq_drain(&sc->sc_snd); bwn_release_firmware(mac); BWN_LOCK_DESTROY(sc); BWN_BUS_OPS_DETACH(dev); return (0); } static void bwn_attach_pre(struct bwn_softc *sc) { BWN_LOCK_INIT(sc); TAILQ_INIT(&sc->sc_maclist); callout_init_mtx(&sc->sc_rfswitch_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_task_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_watchdog_ch, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); sc->sc_tq = taskqueue_create_fast("bwn_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->sc_tq); taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); } static void bwn_sprom_bugfixes(device_t dev) { #define BWN_ISDEV(_vendor, _device, _subvendor, _subdevice) \ ((siba_get_pci_vendor(dev) == PCI_VENDOR_##_vendor) && \ (siba_get_pci_device(dev) == _device) && \ (siba_get_pci_subvendor(dev) == PCI_VENDOR_##_subvendor) && \ (siba_get_pci_subdevice(dev) == _subdevice)) if (siba_get_pci_subvendor(dev) == PCI_VENDOR_APPLE && siba_get_pci_subdevice(dev) == 0x4e && siba_get_pci_revid(dev) > 0x40) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) | BWN_BFL_PACTRL); if (siba_get_pci_subvendor(dev) == SIBA_BOARDVENDOR_DELL && siba_get_chipid(dev) == 0x4301 && siba_get_pci_revid(dev) == 0x74) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) | BWN_BFL_BTCOEXIST); if (siba_get_type(dev) == SIBA_TYPE_PCI) { if (BWN_ISDEV(BROADCOM, 0x4318, ASUSTEK, 0x100f) || BWN_ISDEV(BROADCOM, 0x4320, DELL, 0x0003) || BWN_ISDEV(BROADCOM, 0x4320, HP, 0x12f8) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0013) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0014) || BWN_ISDEV(BROADCOM, 0x4320, LINKSYS, 0x0015) || BWN_ISDEV(BROADCOM, 0x4320, MOTOROLA, 0x7010)) siba_sprom_set_bf_lo(dev, siba_sprom_get_bf_lo(dev) & ~BWN_BFL_BTCOEXIST); } #undef BWN_ISDEV } static void bwn_parent(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; int startall = 0; BWN_LOCK(sc); if (ic->ic_nrunning > 0) { if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) { bwn_init(sc); startall = 1; } else bwn_update_promisc(ic); } else if (sc->sc_flags & BWN_FLAG_RUNNING) bwn_stop(sc); BWN_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static int bwn_transmit(struct ieee80211com *ic, struct mbuf *m) { struct bwn_softc *sc = ic->ic_softc; int error; BWN_LOCK(sc); if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) { BWN_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { BWN_UNLOCK(sc); return (error); } bwn_start(sc); BWN_UNLOCK(sc); return (0); } static void bwn_start(struct bwn_softc *sc) { struct bwn_mac *mac = sc->sc_curmac; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct ieee80211_key *k; struct mbuf *m; BWN_ASSERT_LOCKED(sc); if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac == NULL || mac->mac_status < BWN_MAC_STATUS_STARTED) return; while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { if (bwn_tx_isfull(sc, m)) break; ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (ni == NULL) { device_printf(sc->sc_dev, "unexpected NULL ni\n"); m_freem(m); counter_u64_add(sc->sc_ic.ic_oerrors, 1); continue; } wh = mtod(m, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m); if (k == NULL) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); m_freem(m); continue; } } wh = NULL; /* Catch any invalid use */ if (bwn_tx_start(sc, ni, m) != 0) { if (ni != NULL) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); } continue; } sc->sc_watchdog_timer = 5; } } static int bwn_tx_isfull(struct bwn_softc *sc, struct mbuf *m) { struct bwn_dma_ring *dr; struct bwn_mac *mac = sc->sc_curmac; struct bwn_pio_txqueue *tq; int pktlen = roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); BWN_ASSERT_LOCKED(sc); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { dr = bwn_dma_select(mac, M_WME_GETAC(m)); if (dr->dr_stop == 1 || bwn_dma_freeslot(dr) < BWN_TX_SLOTS_PER_FRAME) { dr->dr_stop = 1; goto full; } } else { tq = bwn_pio_select(mac, M_WME_GETAC(m)); if (tq->tq_free == 0 || pktlen > tq->tq_size || pktlen > (tq->tq_size - tq->tq_used)) goto full; } return (0); full: mbufq_prepend(&sc->sc_snd, m); return (1); } static int bwn_tx_start(struct bwn_softc *sc, struct ieee80211_node *ni, struct mbuf *m) { struct bwn_mac *mac = sc->sc_curmac; int error; BWN_ASSERT_LOCKED(sc); if (m->m_pkthdr.len < IEEE80211_MIN_LEN || mac == NULL) { m_freem(m); return (ENXIO); } error = (mac->mac_flags & BWN_MAC_FLAG_DMA) ? bwn_dma_tx_start(mac, ni, m) : bwn_pio_tx_start(mac, ni, m); if (error) { m_freem(m); return (error); } return (0); } static int bwn_pio_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m) { struct bwn_pio_txpkt *tp; struct bwn_pio_txqueue *tq = bwn_pio_select(mac, M_WME_GETAC(m)); struct bwn_softc *sc = mac->mac_sc; struct bwn_txhdr txhdr; struct mbuf *m_new; uint32_t ctl32; int error; uint16_t ctl16; BWN_ASSERT_LOCKED(sc); /* XXX TODO send packets after DTIM */ KASSERT(!TAILQ_EMPTY(&tq->tq_pktlist), ("%s: fail", __func__)); tp = TAILQ_FIRST(&tq->tq_pktlist); tp->tp_ni = ni; tp->tp_m = m; error = bwn_set_txhdr(mac, ni, m, &txhdr, BWN_PIO_COOKIE(tq, tp)); if (error) { device_printf(sc->sc_dev, "tx fail\n"); return (error); } TAILQ_REMOVE(&tq->tq_pktlist, tp, tp_list); tq->tq_used += roundup(m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free--; if (siba_get_revid(sc->sc_dev) >= 8) { /* * XXX please removes m_defrag(9) */ m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(sc->sc_dev, "%s: can't defrag TX buffer\n", __func__); return (ENOBUFS); } if (m_new->m_next != NULL) device_printf(sc->sc_dev, "TODO: fragmented packets for PIO\n"); tp->tp_m = m_new; /* send HEADER */ ctl32 = bwn_pio_write_multi_4(mac, tq, (BWN_PIO_READ_4(mac, tq, BWN_PIO8_TXCTL) | BWN_PIO8_TXCTL_FRAMEREADY) & ~BWN_PIO8_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); /* send BODY */ ctl32 = bwn_pio_write_multi_4(mac, tq, ctl32, mtod(m_new, const void *), m_new->m_pkthdr.len); bwn_pio_write_4(mac, tq, BWN_PIO_TXCTL, ctl32 | BWN_PIO8_TXCTL_EOF); } else { ctl16 = bwn_pio_write_multi_2(mac, tq, (bwn_pio_read_2(mac, tq, BWN_PIO_TXCTL) | BWN_PIO_TXCTL_FRAMEREADY) & ~BWN_PIO_TXCTL_EOF, (const uint8_t *)&txhdr, BWN_HDRSIZE(mac)); ctl16 = bwn_pio_write_mbuf_2(mac, tq, ctl16, m); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl16 | BWN_PIO_TXCTL_EOF); } return (0); } static struct bwn_pio_txqueue * bwn_pio_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (&mac->mac_method.pio.wme[WME_AC_BE]); switch (prio) { case 0: return (&mac->mac_method.pio.wme[WME_AC_BE]); case 1: return (&mac->mac_method.pio.wme[WME_AC_BK]); case 2: return (&mac->mac_method.pio.wme[WME_AC_VI]); case 3: return (&mac->mac_method.pio.wme[WME_AC_VO]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_tx_start(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m) { #define BWN_GET_TXHDRCACHE(slot) \ &(txhdr_cache[(slot / BWN_TX_SLOTS_PER_FRAME) * BWN_HDRSIZE(mac)]) struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr = bwn_dma_select(mac, M_WME_GETAC(m)); struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; uint8_t *txhdr_cache = (uint8_t *)dr->dr_txhdr_cache; int error, slot, backup[2] = { dr->dr_curslot, dr->dr_usedslot }; BWN_ASSERT_LOCKED(sc); KASSERT(!dr->dr_stop, ("%s:%d: fail", __func__, __LINE__)); /* XXX send after DTIM */ slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_HEADER, ("%s:%d: fail", __func__, __LINE__)); error = bwn_set_txhdr(dr->dr_mac, ni, m, (struct bwn_txhdr *)BWN_GET_TXHDRCACHE(slot), BWN_DMA_COOKIE(dr, slot)); if (error) goto fail; error = bus_dmamap_load(dr->dr_txring_dtag, mt->mt_dmap, BWN_GET_TXHDRCACHE(slot), BWN_HDRSIZE(mac), bwn_dma_ring_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } bus_dmamap_sync(dr->dr_txring_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, BWN_HDRSIZE(mac), 1, 0, 0); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); slot = bwn_dma_getslot(dr); dr->getdesc(dr, slot, &desc, &mt); KASSERT(mt->mt_txtype == BWN_DMADESC_METATYPE_BODY && mt->mt_islast == 1, ("%s:%d: fail", __func__, __LINE__)); mt->mt_m = m; mt->mt_ni = ni; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error && error != EFBIG) { device_printf(sc->sc_dev, "%s: can't load TX buffer (1) %d\n", __func__, error); goto fail; } if (error) { /* error == EFBIG */ struct mbuf *m_new; m_new = m_defrag(m, M_NOWAIT); if (m_new == NULL) { device_printf(sc->sc_dev, "%s: can't defrag TX buffer\n", __func__); error = ENOBUFS; goto fail; } else { m = m_new; } mt->mt_m = m; error = bus_dmamap_load_mbuf(dma->txbuf_dtag, mt->mt_dmap, m, bwn_dma_buf_addr, &mt->mt_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "%s: can't load TX buffer (2) %d\n", __func__, error); goto fail; } } bus_dmamap_sync(dma->txbuf_dtag, mt->mt_dmap, BUS_DMASYNC_PREWRITE); dr->setdesc(dr, desc, mt->mt_paddr, m->m_pkthdr.len, 0, 1, 1); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); /* XXX send after DTIM */ dr->start_transfer(dr, bwn_dma_nextslot(dr, slot)); return (0); fail: dr->dr_curslot = backup[0]; dr->dr_usedslot = backup[1]; return (error); #undef BWN_GET_TXHDRCACHE } static void bwn_watchdog(void *arg) { struct bwn_softc *sc = arg; if (sc->sc_watchdog_timer != 0 && --sc->sc_watchdog_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); counter_u64_add(sc->sc_ic.ic_oerrors, 1); } callout_schedule(&sc->sc_watchdog_ch, hz); } static int bwn_attach_core(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error, have_bg = 0, have_a = 0; uint32_t high; KASSERT(siba_get_revid(sc->sc_dev) >= 5, ("unsupported revision %d", siba_get_revid(sc->sc_dev))); siba_powerup(sc->sc_dev, 0); high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH); have_a = (high & BWN_TGSHIGH_HAVE_5GHZ) ? 1 : 0; have_bg = (high & BWN_TGSHIGH_HAVE_2GHZ) ? 1 : 0; if (high & BWN_TGSHIGH_DUALPHY) { have_bg = 1; have_a = 1; } #if 0 device_printf(sc->sc_dev, "%s: high=0x%08x, have_a=%d, have_bg=%d," " deviceid=0x%04x, siba_deviceid=0x%04x\n", __func__, high, have_a, have_bg, siba_get_pci_device(sc->sc_dev), siba_get_chipid(sc->sc_dev)); #endif /* * Guess at whether it has A-PHY or G-PHY. * This is just used for resetting the core to probe things; * we will re-guess once it's all up and working. */ bwn_reset_core(mac, have_bg); /* * Get the PHY version. */ error = bwn_phy_getinfo(mac, have_bg); if (error) goto fail; /* * This is the whitelist of devices which we "believe" * the SPROM PHY config from. The rest are "guessed". */ if (siba_get_pci_device(sc->sc_dev) != 0x4312 && siba_get_pci_device(sc->sc_dev) != 0x4315 && siba_get_pci_device(sc->sc_dev) != 0x4319 && siba_get_pci_device(sc->sc_dev) != 0x4324 && siba_get_pci_device(sc->sc_dev) != 0x4328 && siba_get_pci_device(sc->sc_dev) != 0x432b) { have_a = have_bg = 0; if (mac->mac_phy.type == BWN_PHYTYPE_A) have_a = 1; else if (mac->mac_phy.type == BWN_PHYTYPE_G || mac->mac_phy.type == BWN_PHYTYPE_N || mac->mac_phy.type == BWN_PHYTYPE_LP) have_bg = 1; else KASSERT(0 == 1, ("%s: unknown phy type (%d)", __func__, mac->mac_phy.type)); } /* * XXX The PHY-G support doesn't do 5GHz operation. */ if (mac->mac_phy.type != BWN_PHYTYPE_LP && mac->mac_phy.type != BWN_PHYTYPE_N) { device_printf(sc->sc_dev, "%s: forcing 2GHz only; no dual-band support for PHY\n", __func__); have_a = 0; have_bg = 1; } mac->mac_phy.phy_n = NULL; if (mac->mac_phy.type == BWN_PHYTYPE_G) { mac->mac_phy.attach = bwn_phy_g_attach; mac->mac_phy.detach = bwn_phy_g_detach; mac->mac_phy.prepare_hw = bwn_phy_g_prepare_hw; mac->mac_phy.init_pre = bwn_phy_g_init_pre; mac->mac_phy.init = bwn_phy_g_init; mac->mac_phy.exit = bwn_phy_g_exit; mac->mac_phy.phy_read = bwn_phy_g_read; mac->mac_phy.phy_write = bwn_phy_g_write; mac->mac_phy.rf_read = bwn_phy_g_rf_read; mac->mac_phy.rf_write = bwn_phy_g_rf_write; mac->mac_phy.use_hwpctl = bwn_phy_g_hwpctl; mac->mac_phy.rf_onoff = bwn_phy_g_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_switch_analog; mac->mac_phy.switch_channel = bwn_phy_g_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_g_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_g_set_antenna; mac->mac_phy.set_im = bwn_phy_g_im; mac->mac_phy.recalc_txpwr = bwn_phy_g_recalc_txpwr; mac->mac_phy.set_txpwr = bwn_phy_g_set_txpwr; mac->mac_phy.task_15s = bwn_phy_g_task_15s; mac->mac_phy.task_60s = bwn_phy_g_task_60s; } else if (mac->mac_phy.type == BWN_PHYTYPE_LP) { mac->mac_phy.init_pre = bwn_phy_lp_init_pre; mac->mac_phy.init = bwn_phy_lp_init; mac->mac_phy.phy_read = bwn_phy_lp_read; mac->mac_phy.phy_write = bwn_phy_lp_write; mac->mac_phy.phy_maskset = bwn_phy_lp_maskset; mac->mac_phy.rf_read = bwn_phy_lp_rf_read; mac->mac_phy.rf_write = bwn_phy_lp_rf_write; mac->mac_phy.rf_onoff = bwn_phy_lp_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_lp_switch_analog; mac->mac_phy.switch_channel = bwn_phy_lp_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_lp_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_lp_set_antenna; mac->mac_phy.task_60s = bwn_phy_lp_task_60s; } else if (mac->mac_phy.type == BWN_PHYTYPE_N) { mac->mac_phy.attach = bwn_phy_n_attach; mac->mac_phy.detach = bwn_phy_n_detach; mac->mac_phy.prepare_hw = bwn_phy_n_prepare_hw; mac->mac_phy.init_pre = bwn_phy_n_init_pre; mac->mac_phy.init = bwn_phy_n_init; mac->mac_phy.exit = bwn_phy_n_exit; mac->mac_phy.phy_read = bwn_phy_n_read; mac->mac_phy.phy_write = bwn_phy_n_write; mac->mac_phy.rf_read = bwn_phy_n_rf_read; mac->mac_phy.rf_write = bwn_phy_n_rf_write; mac->mac_phy.use_hwpctl = bwn_phy_n_hwpctl; mac->mac_phy.rf_onoff = bwn_phy_n_rf_onoff; mac->mac_phy.switch_analog = bwn_phy_n_switch_analog; mac->mac_phy.switch_channel = bwn_phy_n_switch_channel; mac->mac_phy.get_default_chan = bwn_phy_n_get_default_chan; mac->mac_phy.set_antenna = bwn_phy_n_set_antenna; mac->mac_phy.set_im = bwn_phy_n_im; mac->mac_phy.recalc_txpwr = bwn_phy_n_recalc_txpwr; mac->mac_phy.set_txpwr = bwn_phy_n_set_txpwr; mac->mac_phy.task_15s = bwn_phy_n_task_15s; mac->mac_phy.task_60s = bwn_phy_n_task_60s; } else { device_printf(sc->sc_dev, "unsupported PHY type (%d)\n", mac->mac_phy.type); error = ENXIO; goto fail; } mac->mac_phy.gmode = have_bg; if (mac->mac_phy.attach != NULL) { error = mac->mac_phy.attach(mac); if (error) { device_printf(sc->sc_dev, "failed\n"); goto fail; } } bwn_reset_core(mac, have_bg); error = bwn_chiptest(mac); if (error) goto fail; error = bwn_setup_channels(mac, have_bg, have_a); if (error) { device_printf(sc->sc_dev, "failed to setup channels\n"); goto fail; } if (sc->sc_curmac == NULL) sc->sc_curmac = mac; error = bwn_dma_attach(mac); if (error != 0) { device_printf(sc->sc_dev, "failed to initialize DMA\n"); goto fail; } mac->mac_phy.switch_analog(mac, 0); siba_dev_down(sc->sc_dev, 0); fail: siba_powerdown(sc->sc_dev); bwn_release_firmware(mac); return (error); } /* * Reset - SIBA. */ void bwn_reset_core(struct bwn_mac *mac, int g_mode) { struct bwn_softc *sc = mac->mac_sc; uint32_t low, ctl; uint32_t flags = 0; DPRINTF(sc, BWN_DEBUG_RESET, "%s: g_mode=%d\n", __func__, g_mode); flags |= (BWN_TGSLOW_PHYCLOCK_ENABLE | BWN_TGSLOW_PHYRESET); if (g_mode) flags |= BWN_TGSLOW_SUPPORT_G; /* XXX N-PHY only; and hard-code to 20MHz for now */ if (mac->mac_phy.type == BWN_PHYTYPE_N) flags |= BWN_TGSLOW_PHY_BANDWIDTH_20MHZ; siba_dev_up(sc->sc_dev, flags); DELAY(2000); /* Take PHY out of reset */ low = (siba_read_4(sc->sc_dev, SIBA_TGSLOW) | SIBA_TGSLOW_FGC) & ~(BWN_TGSLOW_PHYRESET | BWN_TGSLOW_PHYCLOCK_ENABLE); siba_write_4(sc->sc_dev, SIBA_TGSLOW, low); siba_read_4(sc->sc_dev, SIBA_TGSLOW); DELAY(2000); low &= ~SIBA_TGSLOW_FGC; low |= BWN_TGSLOW_PHYCLOCK_ENABLE; siba_write_4(sc->sc_dev, SIBA_TGSLOW, low); siba_read_4(sc->sc_dev, SIBA_TGSLOW); DELAY(2000); if (mac->mac_phy.switch_analog != NULL) mac->mac_phy.switch_analog(mac, 1); ctl = BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GMODE; if (g_mode) ctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, ctl | BWN_MACCTL_IHR_ON); } static int bwn_phy_getinfo(struct bwn_mac *mac, int gmode) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; /* PHY */ tmp = BWN_READ_2(mac, BWN_PHYVER); phy->gmode = gmode; phy->rf_on = 1; phy->analog = (tmp & BWN_PHYVER_ANALOG) >> 12; phy->type = (tmp & BWN_PHYVER_TYPE) >> 8; phy->rev = (tmp & BWN_PHYVER_VERSION); if ((phy->type == BWN_PHYTYPE_A && phy->rev >= 4) || (phy->type == BWN_PHYTYPE_B && phy->rev != 2 && phy->rev != 4 && phy->rev != 6 && phy->rev != 7) || (phy->type == BWN_PHYTYPE_G && phy->rev > 9) || (phy->type == BWN_PHYTYPE_N && phy->rev > 6) || (phy->type == BWN_PHYTYPE_LP && phy->rev > 2)) goto unsupphy; /* RADIO */ if (siba_get_chipid(sc->sc_dev) == 0x4317) { if (siba_get_chiprev(sc->sc_dev) == 0) tmp = 0x3205017f; else if (siba_get_chiprev(sc->sc_dev) == 1) tmp = 0x4205017f; else tmp = 0x5205017f; } else { BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp = BWN_READ_2(mac, BWN_RFDATALO); BWN_WRITE_2(mac, BWN_RFCTL, BWN_RFCTL_ID); tmp |= (uint32_t)BWN_READ_2(mac, BWN_RFDATAHI) << 16; } phy->rf_rev = (tmp & 0xf0000000) >> 28; phy->rf_ver = (tmp & 0x0ffff000) >> 12; phy->rf_manuf = (tmp & 0x00000fff); /* * For now, just always do full init (ie, what bwn has traditionally * done) */ phy->phy_do_full_init = 1; if (phy->rf_manuf != 0x17f) /* 0x17f is broadcom */ goto unsupradio; if ((phy->type == BWN_PHYTYPE_A && (phy->rf_ver != 0x2060 || phy->rf_rev != 1 || phy->rf_manuf != 0x17f)) || (phy->type == BWN_PHYTYPE_B && (phy->rf_ver & 0xfff0) != 0x2050) || (phy->type == BWN_PHYTYPE_G && phy->rf_ver != 0x2050) || (phy->type == BWN_PHYTYPE_N && phy->rf_ver != 0x2055 && phy->rf_ver != 0x2056) || (phy->type == BWN_PHYTYPE_LP && phy->rf_ver != 0x2062 && phy->rf_ver != 0x2063)) goto unsupradio; return (0); unsupphy: device_printf(sc->sc_dev, "unsupported PHY (type %#x, rev %#x, " "analog %#x)\n", phy->type, phy->rev, phy->analog); return (ENXIO); unsupradio: device_printf(sc->sc_dev, "unsupported radio (manuf %#x, ver %#x, " "rev %#x)\n", phy->rf_manuf, phy->rf_ver, phy->rf_rev); return (ENXIO); } static int bwn_chiptest(struct bwn_mac *mac) { #define TESTVAL0 0x55aaaa55 #define TESTVAL1 0xaa5555aa struct bwn_softc *sc = mac->mac_sc; uint32_t v, backup; BWN_LOCK(sc); backup = bwn_shm_read_4(mac, BWN_SHARED, 0); bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL0); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL0) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, TESTVAL1); if (bwn_shm_read_4(mac, BWN_SHARED, 0) != TESTVAL1) goto error; bwn_shm_write_4(mac, BWN_SHARED, 0, backup); if ((siba_get_revid(sc->sc_dev) >= 3) && (siba_get_revid(sc->sc_dev) <= 10)) { BWN_WRITE_2(mac, BWN_TSF_CFP_START, 0xaaaa); BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0xccccbbbb); if (BWN_READ_2(mac, BWN_TSF_CFP_START_LOW) != 0xbbbb) goto error; if (BWN_READ_2(mac, BWN_TSF_CFP_START_HIGH) != 0xcccc) goto error; } BWN_WRITE_4(mac, BWN_TSF_CFP_START, 0); v = BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_GMODE; if (v != (BWN_MACCTL_GMODE | BWN_MACCTL_IHR_ON)) goto error; BWN_UNLOCK(sc); return (0); error: BWN_UNLOCK(sc); device_printf(sc->sc_dev, "failed to validate the chipaccess\n"); return (ENODEV); } static int bwn_setup_channels(struct bwn_mac *mac, int have_bg, int have_a) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint8_t bands[IEEE80211_MODE_BYTES]; memset(ic->ic_channels, 0, sizeof(ic->ic_channels)); ic->ic_nchans = 0; DPRINTF(sc, BWN_DEBUG_EEPROM, "%s: called; bg=%d, a=%d\n", __func__, have_bg, have_a); if (have_bg) { memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_bg, bands); } if (have_a) { memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11A); bwn_addchannels(ic->ic_channels, IEEE80211_CHAN_MAX, &ic->ic_nchans, &bwn_chantable_a, bands); } mac->mac_phy.supports_2ghz = have_bg; mac->mac_phy.supports_5ghz = have_a; return (ic->ic_nchans == 0 ? ENXIO : 0); } uint32_t bwn_shm_read_4(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); ret <<= 16; bwn_shm_ctlword(mac, way, (offset >> 2) + 1); ret |= BWN_READ_2(mac, BWN_SHM_DATA); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_4(mac, BWN_SHM_DATA); out: return (ret); } uint16_t bwn_shm_read_2(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint16_t ret; BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); ret = BWN_READ_2(mac, BWN_SHM_DATA_UNALIGNED); goto out; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); ret = BWN_READ_2(mac, BWN_SHM_DATA); out: return (ret); } static void bwn_shm_ctlword(struct bwn_mac *mac, uint16_t way, uint16_t offset) { uint32_t control; control = way; control <<= 16; control |= offset; BWN_WRITE_4(mac, BWN_SHM_CONTROL, control); } void bwn_shm_write_4(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint32_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, (value >> 16) & 0xffff); bwn_shm_ctlword(mac, way, (offset >> 2) + 1); BWN_WRITE_2(mac, BWN_SHM_DATA, value & 0xffff); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_4(mac, BWN_SHM_DATA, value); } void bwn_shm_write_2(struct bwn_mac *mac, uint16_t way, uint16_t offset, uint16_t value) { BWN_ASSERT_LOCKED(mac->mac_sc); if (way == BWN_SHARED) { KASSERT((offset & 0x0001) == 0, ("%s:%d warn", __func__, __LINE__)); if (offset & 0x0003) { bwn_shm_ctlword(mac, way, offset >> 2); BWN_WRITE_2(mac, BWN_SHM_DATA_UNALIGNED, value); return; } offset >>= 2; } bwn_shm_ctlword(mac, way, offset); BWN_WRITE_2(mac, BWN_SHM_DATA, value); } static void bwn_addchannels(struct ieee80211_channel chans[], int maxchans, int *nchans, const struct bwn_channelinfo *ci, const uint8_t bands[]) { int i, error; for (i = 0, error = 0; i < ci->nchannels && error == 0; i++) { const struct bwn_channel *hc = &ci->channels[i]; error = ieee80211_add_channel(chans, maxchans, nchans, hc->ieee, hc->freq, hc->maxTxPow, 0, bands); } } static int bwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; int error; if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0 || mac->mac_status < BWN_MAC_STATUS_STARTED) { m_freem(m); return (ENETDOWN); } BWN_LOCK(sc); if (bwn_tx_isfull(sc, m)) { m_freem(m); BWN_UNLOCK(sc); return (ENOBUFS); } error = bwn_tx_start(sc, ni, m); if (error == 0) sc->sc_watchdog_timer = 5; BWN_UNLOCK(sc); return (error); } /* * Callback from the 802.11 layer to update the slot time * based on the current setting. We use it to notify the * firmware of ERP changes and the f/w takes care of things * like slot time and preamble. */ static void bwn_updateslot(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); if (sc->sc_flags & BWN_FLAG_RUNNING) { mac = (struct bwn_mac *)sc->sc_curmac; bwn_set_slot_time(mac, IEEE80211_GET_SLOTTIME(ic)); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer after a promiscuous mode change. * Note this interface does not check the operating mode as this * is an internal callback and we are expected to honor the current * state (e.g. this is used for setting the interface in promiscuous * mode when operating in hostap mode to do ACS). */ static void bwn_update_promisc(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { if (ic->ic_promisc > 0) sc->sc_filters |= BWN_MACCTL_PROMISC; else sc->sc_filters &= ~BWN_MACCTL_PROMISC; bwn_set_opmode(mac); } BWN_UNLOCK(sc); } /* * Callback from the 802.11 layer to update WME parameters. */ static int bwn_wme_update(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; struct chanAccParams chp; struct wmeParams *wmep; int i; ieee80211_wme_ic_getparams(ic, &chp); BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) { wmep = &chp.cap_wmeParams[i]; bwn_wme_loadparams(mac, wmep, bwn_wme_shm_offsets[i]); } bwn_mac_enable(mac); } BWN_UNLOCK(sc); return (0); } static void bwn_scan_start(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters |= BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); /* disable CFP update during scan */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_scan_end(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac; BWN_LOCK(sc); mac = sc->sc_curmac; if (mac != NULL && mac->mac_status >= BWN_MAC_STATUS_INITED) { sc->sc_filters &= ~BWN_MACCTL_BEACON_PROMISC; bwn_set_opmode(mac); bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_SKIP_CFP_UPDATE); } BWN_UNLOCK(sc); } static void bwn_set_channel(struct ieee80211com *ic) { struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; struct bwn_phy *phy = &mac->mac_phy; int chan, error; BWN_LOCK(sc); error = bwn_switch_band(sc, ic->ic_curchan); if (error) goto fail; bwn_mac_suspend(mac); bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); chan = ieee80211_chan2ieee(ic, ic->ic_curchan); if (chan != phy->chan) bwn_switch_channel(mac, chan); /* TX power level */ if (ic->ic_curchan->ic_maxpower != 0 && ic->ic_curchan->ic_maxpower != phy->txpower) { phy->txpower = ic->ic_curchan->ic_maxpower / 2; bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME | BWN_TXPWR_IGNORE_TSSI); } bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); if (sc->sc_rf_enabled != phy->rf_on) { if (sc->sc_rf_enabled) { bwn_rf_turnon(mac); if (!(mac->mac_flags & BWN_MAC_FLAG_RADIO_ON)) device_printf(sc->sc_dev, "please turn on the RF switch\n"); } else bwn_rf_turnoff(mac); } bwn_mac_enable(mac); fail: /* * Setup radio tap channel freq and flags */ sc->sc_tx_th.wt_chan_freq = sc->sc_rx_th.wr_chan_freq = htole16(ic->ic_curchan->ic_freq); sc->sc_tx_th.wt_chan_flags = sc->sc_rx_th.wr_chan_flags = htole16(ic->ic_curchan->ic_flags & 0xffff); BWN_UNLOCK(sc); } static struct ieee80211vap * bwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ieee80211vap *vap; struct bwn_vap *bvp; switch (opmode) { case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: case IEEE80211_M_STA: case IEEE80211_M_WDS: case IEEE80211_M_MONITOR: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: break; default: return (NULL); } bvp = malloc(sizeof(struct bwn_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &bvp->bv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override with driver methods */ bvp->bv_newstate = vap->iv_newstate; vap->iv_newstate = bwn_newstate; /* override max aid so sta's cannot assoc when we're out of sta id's */ vap->iv_max_aid = BWN_STAID_MAX; ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); return (vap); } static void bwn_vap_delete(struct ieee80211vap *vap) { struct bwn_vap *bvp = BWN_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(bvp, M_80211_VAP); } static int bwn_init(struct bwn_softc *sc) { struct bwn_mac *mac; int error; BWN_ASSERT_LOCKED(sc); DPRINTF(sc, BWN_DEBUG_RESET, "%s: called\n", __func__); bzero(sc->sc_bssid, IEEE80211_ADDR_LEN); sc->sc_flags |= BWN_FLAG_NEED_BEACON_TP; sc->sc_filters = 0; bwn_wme_clear(sc); sc->sc_beacons[0] = sc->sc_beacons[1] = 0; sc->sc_rf_enabled = 1; mac = sc->sc_curmac; if (mac->mac_status == BWN_MAC_STATUS_UNINIT) { error = bwn_core_init(mac); if (error != 0) return (error); } if (mac->mac_status == BWN_MAC_STATUS_INITED) bwn_core_start(mac); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); sc->sc_flags |= BWN_FLAG_RUNNING; callout_reset(&sc->sc_rfswitch_ch, hz, bwn_rfswitch, sc); callout_reset(&sc->sc_watchdog_ch, hz, bwn_watchdog, sc); return (0); } static void bwn_stop(struct bwn_softc *sc) { struct bwn_mac *mac = sc->sc_curmac; BWN_ASSERT_LOCKED(sc); DPRINTF(sc, BWN_DEBUG_RESET, "%s: called\n", __func__); if (mac->mac_status >= BWN_MAC_STATUS_INITED) { /* XXX FIXME opmode not based on VAP */ bwn_set_opmode(mac); bwn_set_macaddr(mac); } if (mac->mac_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; bwn_core_exit(mac); sc->sc_rf_enabled = 0; sc->sc_flags &= ~BWN_FLAG_RUNNING; } static void bwn_wme_clear(struct bwn_softc *sc) { #define MS(_v, _f) (((_v) & _f) >> _f##_S) struct wmeParams *p; unsigned int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); for (i = 0; i < N(sc->sc_wmeParams); i++) { p = &(sc->sc_wmeParams[i]); switch (bwn_wme_shm_offsets[i]) { case BWN_WME_VOICE: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_VIDEO: p->wmep_txopLimit = 0; p->wmep_aifsn = 2; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x0001, WME_PARAM_LOGCWMAX); break; case BWN_WME_BESTEFFORT: p->wmep_txopLimit = 0; p->wmep_aifsn = 3; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; case BWN_WME_BACKGROUND: p->wmep_txopLimit = 0; p->wmep_aifsn = 7; /* XXX FIXME: log2(cwmin) */ p->wmep_logcwmin = MS(0x0001, WME_PARAM_LOGCWMIN); p->wmep_logcwmax = MS(0x03ff, WME_PARAM_LOGCWMAX); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static int bwn_core_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; int error; KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: called\n", __func__); siba_powerup(sc->sc_dev, 0); if (!siba_dev_isup(sc->sc_dev)) bwn_reset_core(mac, mac->mac_phy.gmode); mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; mac->mac_phy.hwpctl = (bwn_hwpctl) ? 1 : 0; BWN_GETTIME(mac->mac_phy.nexttime); mac->mac_phy.txerrors = BWN_TXERROR_MAX; bzero(&mac->mac_stats, sizeof(mac->mac_stats)); mac->mac_stats.link_noise = -95; mac->mac_reason_intr = 0; bzero(mac->mac_reason, sizeof(mac->mac_reason)); mac->mac_intr_mask = BWN_INTR_MASKTEMPLATE; #ifdef BWN_DEBUG if (sc->sc_debug & BWN_DEBUG_XMIT) mac->mac_intr_mask &= ~BWN_INTR_PHY_TXERR; #endif mac->mac_suspended = 1; mac->mac_task_state = 0; memset(&mac->mac_noise, 0, sizeof(mac->mac_noise)); mac->mac_phy.init_pre(mac); siba_pcicore_intr(sc->sc_dev); siba_fix_imcfglobug(sc->sc_dev); bwn_bt_disable(mac); if (mac->mac_phy.prepare_hw) { error = mac->mac_phy.prepare_hw(mac); if (error) goto fail0; } DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: chip_init\n", __func__); error = bwn_chip_init(mac); if (error) goto fail0; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_COREREV, siba_get_revid(sc->sc_dev)); hf = bwn_hf_read(mac); if (mac->mac_phy.type == BWN_PHYTYPE_G) { hf |= BWN_HF_GPHY_SYM_WORKAROUND; if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) hf |= BWN_HF_PAGAINBOOST_OFDM_ON; if (mac->mac_phy.rev == 1) hf |= BWN_HF_GPHY_DC_CANCELFILTER; } if (mac->mac_phy.rf_ver == 0x2050) { if (mac->mac_phy.rf_rev < 6) hf |= BWN_HF_FORCE_VCO_RECALC; if (mac->mac_phy.rf_rev == 6) hf |= BWN_HF_4318_TSSI; } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW) hf |= BWN_HF_SLOWCLOCK_REQ_OFF; if ((siba_get_type(sc->sc_dev) == SIBA_TYPE_PCI) && (siba_get_pcicore_revid(sc->sc_dev) <= 10)) hf |= BWN_HF_PCI_SLOWCLOCK_WORKAROUND; hf &= ~BWN_HF_SKIP_CFP_UPDATE; bwn_hf_write(mac, hf); /* Tell the firmware about the MAC capabilities */ if (siba_get_revid(sc->sc_dev) >= 13) { uint32_t cap; cap = BWN_READ_4(mac, BWN_MAC_HW_CAP); DPRINTF(sc, BWN_DEBUG_RESET, "%s: hw capabilities: 0x%08x\n", __func__, cap); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_MACHW_L, cap & 0xffff); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_MACHW_H, (cap >> 16) & 0xffff); } bwn_set_txretry(mac, BWN_RETRY_SHORT, BWN_RETRY_LONG); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SHORT_RETRY_FALLBACK, 3); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_LONG_RETRY_FALLBACK, 2); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_MAXTIME, 1); bwn_rate_init(mac); bwn_set_phytxctl(mac); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MIN, (mac->mac_phy.type == BWN_PHYTYPE_B) ? 0x1f : 0xf); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_CONT_MAX, 0x3ff); if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0) bwn_pio_init(mac); else bwn_dma_init(mac); bwn_wme_init(mac); bwn_spu_setdelay(mac, 1); bwn_bt_enable(mac); DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: powerup\n", __func__); siba_powerup(sc->sc_dev, !(siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_CRYSTAL_NOSLOW)); bwn_set_macaddr(mac); bwn_crypt_init(mac); /* XXX LED initializatin */ mac->mac_status = BWN_MAC_STATUS_INITED; DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: done\n", __func__); return (error); fail0: siba_powerdown(sc->sc_dev); KASSERT(mac->mac_status == BWN_MAC_STATUS_UNINIT, ("%s:%d: fail", __func__, __LINE__)); DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: fail\n", __func__); return (error); } static void bwn_core_start(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; KASSERT(mac->mac_status == BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (siba_get_revid(sc->sc_dev) < 5) return; while (1) { tmp = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(tmp & 0x00000001)) break; tmp = BWN_READ_4(mac, BWN_XMITSTAT_1); } bwn_mac_enable(mac); BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); mac->mac_status = BWN_MAC_STATUS_STARTED; } static void bwn_core_exit(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t macctl; BWN_ASSERT_LOCKED(mac->mac_sc); KASSERT(mac->mac_status <= BWN_MAC_STATUS_INITED, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_status != BWN_MAC_STATUS_INITED) return; mac->mac_status = BWN_MAC_STATUS_UNINIT; macctl = BWN_READ_4(mac, BWN_MACCTL); macctl &= ~BWN_MACCTL_MCODE_RUN; macctl |= BWN_MACCTL_MCODE_JMP0; BWN_WRITE_4(mac, BWN_MACCTL, macctl); bwn_dma_stop(mac); bwn_pio_stop(mac); bwn_chip_exit(mac); mac->mac_phy.switch_analog(mac, 0); siba_dev_down(sc->sc_dev, 0); siba_powerdown(sc->sc_dev); } static void bwn_bt_disable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; (void)sc; /* XXX do nothing yet */ } static int bwn_chip_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; uint32_t macctl; int error; macctl = BWN_MACCTL_IHR_ON | BWN_MACCTL_SHM_ON | BWN_MACCTL_STA; if (phy->gmode) macctl |= BWN_MACCTL_GMODE; BWN_WRITE_4(mac, BWN_MACCTL, macctl); error = bwn_fw_fillinfo(mac); if (error) return (error); error = bwn_fw_loaducode(mac); if (error) return (error); error = bwn_gpio_init(mac); if (error) return (error); error = bwn_fw_loadinitvals(mac); if (error) { siba_gpio_set(sc->sc_dev, 0); return (error); } phy->switch_analog(mac, 1); error = bwn_phy_init(mac); if (error) { siba_gpio_set(sc->sc_dev, 0); return (error); } if (phy->set_im) phy->set_im(mac, BWN_IMMODE_NONE); if (phy->set_antenna) phy->set_antenna(mac, BWN_ANT_DEFAULT); bwn_set_txantenna(mac, BWN_ANT_DEFAULT); if (phy->type == BWN_PHYTYPE_B) BWN_WRITE_2(mac, 0x005e, BWN_READ_2(mac, 0x005e) | 0x0004); BWN_WRITE_4(mac, 0x0100, 0x01000000); if (siba_get_revid(sc->sc_dev) < 5) BWN_WRITE_4(mac, 0x010c, 0x01000000); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_STA); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_STA); bwn_shm_write_2(mac, BWN_SHARED, 0x0074, 0x0000); bwn_set_opmode(mac); if (siba_get_revid(sc->sc_dev) < 3) { BWN_WRITE_2(mac, 0x060e, 0x0000); BWN_WRITE_2(mac, 0x0610, 0x8000); BWN_WRITE_2(mac, 0x0604, 0x0000); BWN_WRITE_2(mac, 0x0606, 0x0200); } else { BWN_WRITE_4(mac, 0x0188, 0x80000000); BWN_WRITE_4(mac, 0x018c, 0x02000000); } BWN_WRITE_4(mac, BWN_INTR_REASON, 0x00004000); BWN_WRITE_4(mac, BWN_DMA0_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA1_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA2_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA3_INTR_MASK, 0x0001dc00); BWN_WRITE_4(mac, BWN_DMA4_INTR_MASK, 0x0000dc00); BWN_WRITE_4(mac, BWN_DMA5_INTR_MASK, 0x0000dc00); bwn_mac_phy_clock_set(mac, true); /* SIBA powerup */ BWN_WRITE_2(mac, BWN_POWERUP_DELAY, siba_get_cc_powerdelay(sc->sc_dev)); return (error); } /* read hostflags */ uint64_t bwn_hf_read(struct bwn_mac *mac) { uint64_t ret; ret = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFHI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFMI); ret <<= 16; ret |= bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_HFLO); return (ret); } void bwn_hf_write(struct bwn_mac *mac, uint64_t value) { bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFLO, (value & 0x00000000ffffull)); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFMI, (value & 0x0000ffff0000ull) >> 16); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_HFHI, (value & 0xffff00000000ULL) >> 32); } static void bwn_set_txretry(struct bwn_mac *mac, int s, int l) { bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_SHORT_RETRY, MIN(s, 0xf)); bwn_shm_write_2(mac, BWN_SCRATCH, BWN_SCRATCH_LONG_RETRY, MIN(l, 0xf)); } static void bwn_rate_init(struct bwn_mac *mac) { switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: case BWN_PHYTYPE_G: case BWN_PHYTYPE_LP: case BWN_PHYTYPE_N: bwn_rate_write(mac, BWN_OFDM_RATE_6MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_12MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_18MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_24MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_36MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_48MB, 1); bwn_rate_write(mac, BWN_OFDM_RATE_54MB, 1); if (mac->mac_phy.type == BWN_PHYTYPE_A) break; /* FALLTHROUGH */ case BWN_PHYTYPE_B: bwn_rate_write(mac, BWN_CCK_RATE_1MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_2MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_5MB, 0); bwn_rate_write(mac, BWN_CCK_RATE_11MB, 0); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } static void bwn_rate_write(struct bwn_mac *mac, uint16_t rate, int ofdm) { uint16_t offset; if (ofdm) { offset = 0x480; offset += (bwn_plcp_getofdm(rate) & 0x000f) * 2; } else { offset = 0x4c0; offset += (bwn_plcp_getcck(rate) & 0x000f) * 2; } bwn_shm_write_2(mac, BWN_SHARED, offset + 0x20, bwn_shm_read_2(mac, BWN_SHARED, offset)); } static uint8_t bwn_plcp_getcck(const uint8_t bitrate) { switch (bitrate) { case BWN_CCK_RATE_1MB: return (0x0a); case BWN_CCK_RATE_2MB: return (0x14); case BWN_CCK_RATE_5MB: return (0x37); case BWN_CCK_RATE_11MB: return (0x6e); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint8_t bwn_plcp_getofdm(const uint8_t bitrate) { switch (bitrate) { case BWN_OFDM_RATE_6MB: return (0xb); case BWN_OFDM_RATE_9MB: return (0xf); case BWN_OFDM_RATE_12MB: return (0xa); case BWN_OFDM_RATE_18MB: return (0xe); case BWN_OFDM_RATE_24MB: return (0x9); case BWN_OFDM_RATE_36MB: return (0xd); case BWN_OFDM_RATE_48MB: return (0x8); case BWN_OFDM_RATE_54MB: return (0xc); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_set_phytxctl(struct bwn_mac *mac) { uint16_t ctl; ctl = (BWN_TX_PHY_ENC_CCK | BWN_TX_PHY_ANT01AUTO | BWN_TX_PHY_TXPWR); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_BEACON_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, ctl); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, ctl); } static void bwn_pio_init(struct bwn_mac *mac) { struct bwn_pio *pio = &mac->mac_method.pio; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_BIGENDIAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_RX_PADOFFSET, 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BK], 0); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_BE], 1); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VI], 2); bwn_pio_set_txqueue(mac, &pio->wme[WME_AC_VO], 3); bwn_pio_set_txqueue(mac, &pio->mcast, 4); bwn_pio_setupqueue_rx(mac, &pio->rx, 0); } static void bwn_pio_set_txqueue(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, int index) { struct bwn_pio_txpkt *tp; struct bwn_softc *sc = mac->mac_sc; unsigned int i; tq->tq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_TXQOFFSET(mac); tq->tq_index = index; tq->tq_free = BWN_PIO_MAX_TXPACKETS; if (siba_get_revid(sc->sc_dev) >= 8) tq->tq_size = 1920; else { tq->tq_size = bwn_pio_read_2(mac, tq, BWN_PIO_TXQBUFSIZE); tq->tq_size -= 80; } TAILQ_INIT(&tq->tq_pktlist); for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); tp->tp_index = i; tp->tp_queue = tq; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); } } static uint16_t bwn_pio_idx2base(struct bwn_mac *mac, int index) { struct bwn_softc *sc = mac->mac_sc; static const uint16_t bases[] = { BWN_PIO_BASE0, BWN_PIO_BASE1, BWN_PIO_BASE2, BWN_PIO_BASE3, BWN_PIO_BASE4, BWN_PIO_BASE5, BWN_PIO_BASE6, BWN_PIO_BASE7, }; static const uint16_t bases_rev11[] = { BWN_PIO11_BASE0, BWN_PIO11_BASE1, BWN_PIO11_BASE2, BWN_PIO11_BASE3, BWN_PIO11_BASE4, BWN_PIO11_BASE5, }; if (siba_get_revid(sc->sc_dev) >= 11) { if (index >= N(bases_rev11)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases_rev11[index]); } if (index >= N(bases)) device_printf(sc->sc_dev, "%s: warning\n", __func__); return (bases[index]); } static void bwn_pio_setupqueue_rx(struct bwn_mac *mac, struct bwn_pio_rxqueue *prq, int index) { struct bwn_softc *sc = mac->mac_sc; prq->prq_mac = mac; prq->prq_rev = siba_get_revid(sc->sc_dev); prq->prq_base = bwn_pio_idx2base(mac, index) + BWN_PIO_RXQOFFSET(mac); bwn_dma_rxdirectfifo(mac, index, 1); } static void bwn_destroy_pioqueue_tx(struct bwn_pio_txqueue *tq) { if (tq == NULL) return; bwn_pio_cancel_tx_packets(tq); } static void bwn_destroy_queue_tx(struct bwn_pio_txqueue *pio) { bwn_destroy_pioqueue_tx(pio); } static uint16_t bwn_pio_read_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset) { return (BWN_READ_2(mac, tq->tq_base + offset)); } static void bwn_dma_rxdirectfifo(struct bwn_mac *mac, int idx, uint8_t enable) { uint32_t ctl; int type; uint16_t base; type = bwn_dma_mask2type(bwn_dma_mask(mac)); base = bwn_dma_base(type, idx); if (type == BWN_DMA_64BIT) { ctl = BWN_READ_4(mac, base + BWN_DMA64_RXCTL); ctl &= ~BWN_DMA64_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA64_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA64_RXCTL, ctl); } else { ctl = BWN_READ_4(mac, base + BWN_DMA32_RXCTL); ctl &= ~BWN_DMA32_RXDIRECTFIFO; if (enable) ctl |= BWN_DMA32_RXDIRECTFIFO; BWN_WRITE_4(mac, base + BWN_DMA32_RXCTL, ctl); } } static uint64_t bwn_dma_mask(struct bwn_mac *mac) { uint32_t tmp; uint16_t base; tmp = BWN_READ_4(mac, SIBA_TGSHIGH); if (tmp & SIBA_TGSHIGH_DMA64) return (BWN_DMA_BIT_MASK(64)); base = bwn_dma_base(0, 0); BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK); tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL); if (tmp & BWN_DMA32_TXADDREXT_MASK) return (BWN_DMA_BIT_MASK(32)); return (BWN_DMA_BIT_MASK(30)); } static int bwn_dma_mask2type(uint64_t dmamask) { if (dmamask == BWN_DMA_BIT_MASK(30)) return (BWN_DMA_30BIT); if (dmamask == BWN_DMA_BIT_MASK(32)) return (BWN_DMA_32BIT); if (dmamask == BWN_DMA_BIT_MASK(64)) return (BWN_DMA_64BIT); KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (BWN_DMA_30BIT); } static void bwn_pio_cancel_tx_packets(struct bwn_pio_txqueue *tq) { struct bwn_pio_txpkt *tp; unsigned int i; for (i = 0; i < N(tq->tq_pkts); i++) { tp = &(tq->tq_pkts[i]); if (tp->tp_m) { m_freem(tp->tp_m); tp->tp_m = NULL; } } } static uint16_t bwn_dma_base(int type, int controller_idx) { static const uint16_t map64[] = { BWN_DMA64_BASE0, BWN_DMA64_BASE1, BWN_DMA64_BASE2, BWN_DMA64_BASE3, BWN_DMA64_BASE4, BWN_DMA64_BASE5, }; static const uint16_t map32[] = { BWN_DMA32_BASE0, BWN_DMA32_BASE1, BWN_DMA32_BASE2, BWN_DMA32_BASE3, BWN_DMA32_BASE4, BWN_DMA32_BASE5, }; if (type == BWN_DMA_64BIT) { KASSERT(controller_idx >= 0 && controller_idx < N(map64), ("%s:%d: fail", __func__, __LINE__)); return (map64[controller_idx]); } KASSERT(controller_idx >= 0 && controller_idx < N(map32), ("%s:%d: fail", __func__, __LINE__)); return (map32[controller_idx]); } static void bwn_dma_init(struct bwn_mac *mac) { struct bwn_dma *dma = &mac->mac_method.dma; /* setup TX DMA channels. */ bwn_dma_setup(dma->wme[WME_AC_BK]); bwn_dma_setup(dma->wme[WME_AC_BE]); bwn_dma_setup(dma->wme[WME_AC_VI]); bwn_dma_setup(dma->wme[WME_AC_VO]); bwn_dma_setup(dma->mcast); /* setup RX DMA channel. */ bwn_dma_setup(dma->rx); } static struct bwn_dma_ring * bwn_dma_ringsetup(struct bwn_mac *mac, int controller_index, int for_tx, int type) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *mt; struct bwn_softc *sc = mac->mac_sc; int error, i; dr = malloc(sizeof(*dr), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr == NULL) goto out; dr->dr_numslots = BWN_RXRING_SLOTS; if (for_tx) dr->dr_numslots = BWN_TXRING_SLOTS; - dr->dr_meta = malloc(dr->dr_numslots * sizeof(struct bwn_dmadesc_meta), - M_DEVBUF, M_NOWAIT | M_ZERO); + dr->dr_meta = mallocarray(dr->dr_numslots, + sizeof(struct bwn_dmadesc_meta), M_DEVBUF, M_NOWAIT | M_ZERO); if (dr->dr_meta == NULL) goto fail0; dr->dr_type = type; dr->dr_mac = mac; dr->dr_base = bwn_dma_base(type, controller_index); dr->dr_index = controller_index; if (type == BWN_DMA_64BIT) { dr->getdesc = bwn_dma_64_getdesc; dr->setdesc = bwn_dma_64_setdesc; dr->start_transfer = bwn_dma_64_start_transfer; dr->suspend = bwn_dma_64_suspend; dr->resume = bwn_dma_64_resume; dr->get_curslot = bwn_dma_64_get_curslot; dr->set_curslot = bwn_dma_64_set_curslot; } else { dr->getdesc = bwn_dma_32_getdesc; dr->setdesc = bwn_dma_32_setdesc; dr->start_transfer = bwn_dma_32_start_transfer; dr->suspend = bwn_dma_32_suspend; dr->resume = bwn_dma_32_resume; dr->get_curslot = bwn_dma_32_get_curslot; dr->set_curslot = bwn_dma_32_set_curslot; } if (for_tx) { dr->dr_tx = 1; dr->dr_curslot = -1; } else { if (dr->dr_index == 0) { switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: dr->dr_rx_bufsize = BWN_DMA0_RX_BUFFERSIZE_FW351; dr->dr_frameoffset = BWN_DMA0_RX_FRAMEOFFSET_FW351; break; case BWN_FW_HDR_598: dr->dr_rx_bufsize = BWN_DMA0_RX_BUFFERSIZE_FW598; dr->dr_frameoffset = BWN_DMA0_RX_FRAMEOFFSET_FW598; break; } } else KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } error = bwn_dma_allocringmemory(dr); if (error) goto fail2; if (for_tx) { /* * Assumption: BWN_TXRING_SLOTS can be divided by * BWN_TX_SLOTS_PER_FRAME */ KASSERT(BWN_TXRING_SLOTS % BWN_TX_SLOTS_PER_FRAME == 0, ("%s:%d: fail", __func__, __LINE__)); dr->dr_txhdr_cache = contigmalloc( (dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) * BWN_MAXTXHDRSIZE, M_DEVBUF, M_ZERO, 0, BUS_SPACE_MAXADDR, 8, 0); if (dr->dr_txhdr_cache == NULL) { device_printf(sc->sc_dev, "can't allocate TX header DMA memory\n"); goto fail1; } /* * Create TX ring DMA stuffs */ error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_HDRSIZE(mac), 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_txring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); goto fail2; } for (i = 0; i < dr->dr_numslots; i += 2) { dr->getdesc(dr, i, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_HEADER; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 0; error = bus_dmamap_create(dr->dr_txring_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail2; } dr->getdesc(dr, i + 1, &desc, &mt); mt->mt_txtype = BWN_DMADESC_METATYPE_BODY; mt->mt_m = NULL; mt->mt_ni = NULL; mt->mt_islast = 1; error = bus_dmamap_create(dma->txbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto fail2; } } } else { error = bus_dmamap_create(dma->rxbuf_dtag, 0, &dr->dr_spare_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &mt); error = bus_dmamap_create(dma->rxbuf_dtag, 0, &mt->mt_dmap); if (error) { device_printf(sc->sc_dev, "can't create RX buf DMA map\n"); goto out; /* XXX wrong! */ } error = bwn_dma_newbuf(dr, desc, mt, 1); if (error) { device_printf(sc->sc_dev, "failed to allocate RX buf\n"); goto out; /* XXX wrong! */ } } bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->dr_usedslot = dr->dr_numslots; } out: return (dr); fail2: if (dr->dr_txhdr_cache != NULL) { contigfree(dr->dr_txhdr_cache, (dr->dr_numslots / BWN_TX_SLOTS_PER_FRAME) * BWN_MAXTXHDRSIZE, M_DEVBUF); } fail1: free(dr->dr_meta, M_DEVBUF); fail0: free(dr, M_DEVBUF); return (NULL); } static void bwn_dma_ringfree(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_free_descbufs(*dr); bwn_dma_free_ringmemory(*dr); if ((*dr)->dr_txhdr_cache != NULL) { contigfree((*dr)->dr_txhdr_cache, ((*dr)->dr_numslots / BWN_TX_SLOTS_PER_FRAME) * BWN_MAXTXHDRSIZE, M_DEVBUF); } free((*dr)->dr_meta, M_DEVBUF); free(*dr, M_DEVBUF); *dr = NULL; } static void bwn_dma_32_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc32 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_32_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc32 *descbase = dr->dr_ring_descbase; struct bwn_softc *sc = dr->dr_mac->mac_sc; uint32_t addr, addrext, ctl; int slot; slot = (int)(&(desc->dma.dma32) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addr = (uint32_t) (dmaaddr & ~SIBA_DMA_TRANSLATION_MASK); addrext = (uint32_t) (dmaaddr & SIBA_DMA_TRANSLATION_MASK) >> 30; addr |= siba_dma_translation(sc->sc_dev); ctl = bufsize & BWN_DMA32_DCTL_BYTECNT; if (slot == dr->dr_numslots - 1) ctl |= BWN_DMA32_DCTL_DTABLEEND; if (start) ctl |= BWN_DMA32_DCTL_FRAMESTART; if (end) ctl |= BWN_DMA32_DCTL_FRAMEEND; if (irq) ctl |= BWN_DMA32_DCTL_IRQ; ctl |= (addrext << BWN_DMA32_DCTL_ADDREXT_SHIFT) & BWN_DMA32_DCTL_ADDREXT_MASK; desc->dma.dma32.control = htole32(ctl); desc->dma.dma32.address = htole32(addr); } static void bwn_dma_32_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_32_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) | BWN_DMA32_TXSUSPEND); } static void bwn_dma_32_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, BWN_DMA_READ(dr, BWN_DMA32_TXCTL) & ~BWN_DMA32_TXSUSPEND); } static int bwn_dma_32_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA32_RXSTATUS); val &= BWN_DMA32_RXDPTR; return (val / sizeof(struct bwn_dmadesc32)); } static void bwn_dma_32_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, (uint32_t) (slot * sizeof(struct bwn_dmadesc32))); } static void bwn_dma_64_getdesc(struct bwn_dma_ring *dr, int slot, struct bwn_dmadesc_generic **gdesc, struct bwn_dmadesc_meta **meta) { struct bwn_dmadesc64 *desc; *meta = &(dr->dr_meta[slot]); desc = dr->dr_ring_descbase; desc = &(desc[slot]); *gdesc = (struct bwn_dmadesc_generic *)desc; } static void bwn_dma_64_setdesc(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, bus_addr_t dmaaddr, uint16_t bufsize, int start, int end, int irq) { struct bwn_dmadesc64 *descbase = dr->dr_ring_descbase; struct bwn_softc *sc = dr->dr_mac->mac_sc; int slot; uint32_t ctl0 = 0, ctl1 = 0; uint32_t addrlo, addrhi; uint32_t addrext; slot = (int)(&(desc->dma.dma64) - descbase); KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); addrlo = (uint32_t) (dmaaddr & 0xffffffff); addrhi = (((uint64_t) dmaaddr >> 32) & ~SIBA_DMA_TRANSLATION_MASK); addrext = (((uint64_t) dmaaddr >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; addrhi |= (siba_dma_translation(sc->sc_dev) << 1); if (slot == dr->dr_numslots - 1) ctl0 |= BWN_DMA64_DCTL0_DTABLEEND; if (start) ctl0 |= BWN_DMA64_DCTL0_FRAMESTART; if (end) ctl0 |= BWN_DMA64_DCTL0_FRAMEEND; if (irq) ctl0 |= BWN_DMA64_DCTL0_IRQ; ctl1 |= bufsize & BWN_DMA64_DCTL1_BYTECNT; ctl1 |= (addrext << BWN_DMA64_DCTL1_ADDREXT_SHIFT) & BWN_DMA64_DCTL1_ADDREXT_MASK; desc->dma.dma64.control0 = htole32(ctl0); desc->dma.dma64.control1 = htole32(ctl1); desc->dma.dma64.address_low = htole32(addrlo); desc->dma.dma64.address_high = htole32(addrhi); } static void bwn_dma_64_start_transfer(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_TXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static void bwn_dma_64_suspend(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) | BWN_DMA64_TXSUSPEND); } static void bwn_dma_64_resume(struct bwn_dma_ring *dr) { BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, BWN_DMA_READ(dr, BWN_DMA64_TXCTL) & ~BWN_DMA64_TXSUSPEND); } static int bwn_dma_64_get_curslot(struct bwn_dma_ring *dr) { uint32_t val; val = BWN_DMA_READ(dr, BWN_DMA64_RXSTATUS); val &= BWN_DMA64_RXSTATDPTR; return (val / sizeof(struct bwn_dmadesc64)); } static void bwn_dma_64_set_curslot(struct bwn_dma_ring *dr, int slot) { BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, (uint32_t)(slot * sizeof(struct bwn_dmadesc64))); } static int bwn_dma_allocringmemory(struct bwn_dma_ring *dr) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int error; error = bus_dma_tag_create(dma->parent_dtag, BWN_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BWN_DMA_RINGMEMSIZE, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dr->dr_ring_dtag); if (error) { device_printf(sc->sc_dev, "can't create TX ring DMA tag: TODO frees\n"); return (-1); } error = bus_dmamem_alloc(dr->dr_ring_dtag, &dr->dr_ring_descbase, BUS_DMA_WAITOK | BUS_DMA_ZERO, &dr->dr_ring_dmap); if (error) { device_printf(sc->sc_dev, "can't allocate DMA mem: TODO frees\n"); return (-1); } error = bus_dmamap_load(dr->dr_ring_dtag, dr->dr_ring_dmap, dr->dr_ring_descbase, BWN_DMA_RINGMEMSIZE, bwn_dma_ring_addr, &dr->dr_ring_dmabase, BUS_DMA_NOWAIT); if (error) { device_printf(sc->sc_dev, "can't load DMA mem: TODO free\n"); return (-1); } return (0); } static void bwn_dma_setup(struct bwn_dma_ring *dr) { struct bwn_softc *sc = dr->dr_mac->mac_sc; uint64_t ring64; uint32_t addrext, ring32, value; uint32_t trans = siba_dma_translation(sc->sc_dev); if (dr->dr_tx) { dr->dr_curslot = -1; if (dr->dr_type == BWN_DMA_64BIT) { ring64 = (uint64_t)(dr->dr_ring_dmabase); addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; value = BWN_DMA64_TXENABLE; value |= BWN_DMA64_TXPARITY_DISABLE; value |= (addrext << BWN_DMA64_TXADDREXT_SHIFT) & BWN_DMA64_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, (ring64 & 0xffffffff)); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, ((ring64 >> 32) & ~SIBA_DMA_TRANSLATION_MASK) | (trans << 1)); } else { ring32 = (uint32_t)(dr->dr_ring_dmabase); addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30; value = BWN_DMA32_TXENABLE; value |= BWN_DMA32_TXPARITY_DISABLE; value |= (addrext << BWN_DMA32_TXADDREXT_SHIFT) & BWN_DMA32_TXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_TXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, (ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans); } return; } /* * set for RX */ dr->dr_usedslot = dr->dr_numslots; if (dr->dr_type == BWN_DMA_64BIT) { ring64 = (uint64_t)(dr->dr_ring_dmabase); addrext = ((ring64 >> 32) & SIBA_DMA_TRANSLATION_MASK) >> 30; value = (dr->dr_frameoffset << BWN_DMA64_RXFROFF_SHIFT); value |= BWN_DMA64_RXENABLE; value |= BWN_DMA64_RXPARITY_DISABLE; value |= (addrext << BWN_DMA64_RXADDREXT_SHIFT) & BWN_DMA64_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA64_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, (ring64 & 0xffffffff)); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, ((ring64 >> 32) & ~SIBA_DMA_TRANSLATION_MASK) | (trans << 1)); BWN_DMA_WRITE(dr, BWN_DMA64_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc64)); } else { ring32 = (uint32_t)(dr->dr_ring_dmabase); addrext = (ring32 & SIBA_DMA_TRANSLATION_MASK) >> 30; value = (dr->dr_frameoffset << BWN_DMA32_RXFROFF_SHIFT); value |= BWN_DMA32_RXENABLE; value |= BWN_DMA32_RXPARITY_DISABLE; value |= (addrext << BWN_DMA32_RXADDREXT_SHIFT) & BWN_DMA32_RXADDREXT_MASK; BWN_DMA_WRITE(dr, BWN_DMA32_RXCTL, value); BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, (ring32 & ~SIBA_DMA_TRANSLATION_MASK) | trans); BWN_DMA_WRITE(dr, BWN_DMA32_RXINDEX, dr->dr_numslots * sizeof(struct bwn_dmadesc32)); } } static void bwn_dma_free_ringmemory(struct bwn_dma_ring *dr) { bus_dmamap_unload(dr->dr_ring_dtag, dr->dr_ring_dmap); bus_dmamem_free(dr->dr_ring_dtag, dr->dr_ring_descbase, dr->dr_ring_dmap); } static void bwn_dma_cleanup(struct bwn_dma_ring *dr) { if (dr->dr_tx) { bwn_dma_tx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BWN_DMA_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_TXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_TXRING, 0); } else { bwn_dma_rx_reset(dr->dr_mac, dr->dr_base, dr->dr_type); if (dr->dr_type == BWN_DMA_64BIT) { BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGLO, 0); BWN_DMA_WRITE(dr, BWN_DMA64_RXRINGHI, 0); } else BWN_DMA_WRITE(dr, BWN_DMA32_RXRING, 0); } } static void bwn_dma_free_descbufs(struct bwn_dma_ring *dr) { struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; int i; if (!dr->dr_usedslot) return; for (i = 0; i < dr->dr_numslots; i++) { dr->getdesc(dr, i, &desc, &meta); if (meta->mt_m == NULL) { if (!dr->dr_tx) device_printf(sc->sc_dev, "%s: not TX?\n", __func__); continue; } if (dr->dr_tx) { if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); } else bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); bwn_dma_free_descbuf(dr, meta); } } static int bwn_dma_tx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED || value == BWN_DMA64_TXSTAT_IDLEWAIT || value == BWN_DMA64_TXSTAT_STOPPED) break; } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED || value == BWN_DMA32_TXSTAT_IDLEWAIT || value == BWN_DMA32_TXSTAT_STOPPED) break; } DELAY(1000); } offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXCTL : BWN_DMA32_TXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_TXSTATUS : BWN_DMA32_TXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_TXSTAT; if (value == BWN_DMA64_TXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_TXSTATE; if (value == BWN_DMA32_TXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } DELAY(1000); return (0); } static int bwn_dma_rx_reset(struct bwn_mac *mac, uint16_t base, int type) { struct bwn_softc *sc = mac->mac_sc; uint32_t value; int i; uint16_t offset; offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXCTL : BWN_DMA32_RXCTL; BWN_WRITE_4(mac, base + offset, 0); for (i = 0; i < 10; i++) { offset = (type == BWN_DMA_64BIT) ? BWN_DMA64_RXSTATUS : BWN_DMA32_RXSTATUS; value = BWN_READ_4(mac, base + offset); if (type == BWN_DMA_64BIT) { value &= BWN_DMA64_RXSTAT; if (value == BWN_DMA64_RXSTAT_DISABLED) { i = -1; break; } } else { value &= BWN_DMA32_RXSTATE; if (value == BWN_DMA32_RXSTAT_DISABLED) { i = -1; break; } } DELAY(1000); } if (i != -1) { device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (ENODEV); } return (0); } static void bwn_dma_free_descbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_meta *meta) { if (meta->mt_m != NULL) { m_freem(meta->mt_m); meta->mt_m = NULL; } if (meta->mt_ni != NULL) { ieee80211_free_node(meta->mt_ni); meta->mt_ni = NULL; } } static void bwn_dma_set_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { struct bwn_rxhdr4 *rxhdr; unsigned char *frame; rxhdr = mtod(m, struct bwn_rxhdr4 *); rxhdr->frame_len = 0; KASSERT(dr->dr_rx_bufsize >= dr->dr_frameoffset + sizeof(struct bwn_plcp6) + 2, ("%s:%d: fail", __func__, __LINE__)); frame = mtod(m, char *) + dr->dr_frameoffset; memset(frame, 0xff, sizeof(struct bwn_plcp6) + 2 /* padding */); } static uint8_t bwn_dma_check_redzone(struct bwn_dma_ring *dr, struct mbuf *m) { unsigned char *f = mtod(m, char *) + dr->dr_frameoffset; return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xff); } static void bwn_wme_init(struct bwn_mac *mac) { bwn_wme_load(mac); /* enable WME support. */ bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_EDCF); BWN_WRITE_2(mac, BWN_IFSCTL, BWN_READ_2(mac, BWN_IFSCTL) | BWN_IFSCTL_USE_EDCF); } static void bwn_spu_setdelay(struct bwn_mac *mac, int idle) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t delay; /* microsec */ delay = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 3700 : 1050; if (ic->ic_opmode == IEEE80211_M_IBSS || idle) delay = 500; if ((mac->mac_phy.rf_ver == 0x2050) && (mac->mac_phy.rf_rev == 8)) delay = max(delay, (uint16_t)2400); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_SPU_WAKEUP, delay); } static void bwn_bt_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint64_t hf; if (bwn_bluetooth == 0) return; if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCOEXIST) == 0) return; if (mac->mac_phy.type != BWN_PHYTYPE_B && !mac->mac_phy.gmode) return; hf = bwn_hf_read(mac); if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_BTCMOD) hf |= BWN_HF_BT_COEXISTALT; else hf |= BWN_HF_BT_COEXIST; bwn_hf_write(mac, hf); } static void bwn_set_macaddr(struct bwn_mac *mac) { bwn_mac_write_bssid(mac); bwn_mac_setfilter(mac, BWN_MACFILTER_SELF, mac->mac_sc->sc_ic.ic_macaddr); } static void bwn_clear_keys(struct bwn_mac *mac) { int i; for (i = 0; i < mac->mac_max_nr_keys; i++) { KASSERT(i >= 0 && i < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); bwn_key_dowrite(mac, i, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); if ((i <= 3) && !BWN_SEC_NEWAPI(mac)) { bwn_key_dowrite(mac, i + 4, BWN_SEC_ALGO_NONE, NULL, BWN_SEC_KEYSIZE, NULL); } mac->mac_key[i].keyconf = NULL; } } static void bwn_crypt_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; mac->mac_max_nr_keys = (siba_get_revid(sc->sc_dev) >= 5) ? 58 : 20; KASSERT(mac->mac_max_nr_keys <= N(mac->mac_key), ("%s:%d: fail", __func__, __LINE__)); mac->mac_ktp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_KEY_TABLEP); mac->mac_ktp *= 2; if (siba_get_revid(sc->sc_dev) >= 5) BWN_WRITE_2(mac, BWN_RCMTA_COUNT, mac->mac_max_nr_keys - 8); bwn_clear_keys(mac); } static void bwn_chip_exit(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; bwn_phy_exit(mac); siba_gpio_set(sc->sc_dev, 0); } static int bwn_fw_fillinfo(struct bwn_mac *mac) { int error; error = bwn_fw_gets(mac, BWN_FWTYPE_DEFAULT); if (error == 0) return (0); error = bwn_fw_gets(mac, BWN_FWTYPE_OPENSOURCE); if (error == 0) return (0); return (error); } static int bwn_gpio_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t mask = 0x1f, set = 0xf, value; BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_GPOUT_MASK); BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | 0x000f); if (siba_get_chipid(sc->sc_dev) == 0x4301) { mask |= 0x0060; set |= 0x0060; } if (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_PACTRL) { BWN_WRITE_2(mac, BWN_GPIO_MASK, BWN_READ_2(mac, BWN_GPIO_MASK) | 0x0200); mask |= 0x0200; set |= 0x0200; } if (siba_get_revid(sc->sc_dev) >= 2) mask |= 0x0010; value = siba_gpio_get(sc->sc_dev); if (value == -1) return (0); siba_gpio_set(sc->sc_dev, (value & mask) | set); return (0); } static int bwn_fw_loadinitvals(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const struct bwn_fwinitvals *)((const char *)fwp.fw->data + offset)) const size_t hdr_len = sizeof(struct bwn_fwhdr); const struct bwn_fwhdr *hdr; struct bwn_fw *fw = &mac->mac_fw; int error; hdr = (const struct bwn_fwhdr *)(fw->initvals.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals, hdr_len), be32toh(hdr->size), fw->initvals.fw->datasize - hdr_len); if (error) return (error); if (fw->initvals_band.fw) { hdr = (const struct bwn_fwhdr *)(fw->initvals_band.fw->data); error = bwn_fwinitvals_write(mac, GETFWOFFSET(fw->initvals_band, hdr_len), be32toh(hdr->size), fw->initvals_band.fw->datasize - hdr_len); } return (error); #undef GETFWOFFSET } static int bwn_phy_init(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int error; mac->mac_phy.chan = mac->mac_phy.get_default_chan(mac); mac->mac_phy.rf_onoff(mac, 1); error = mac->mac_phy.init(mac); if (error) { device_printf(sc->sc_dev, "PHY init failed\n"); goto fail0; } error = bwn_switch_channel(mac, mac->mac_phy.get_default_chan(mac)); if (error) { device_printf(sc->sc_dev, "failed to switch default channel\n"); goto fail1; } return (0); fail1: if (mac->mac_phy.exit) mac->mac_phy.exit(mac); fail0: mac->mac_phy.rf_onoff(mac, 0); return (error); } static void bwn_set_txantenna(struct bwn_mac *mac, int antenna) { uint16_t ant; uint16_t tmp; ant = bwn_ant2phy(antenna); /* For ACK/CTS */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_ACKCTS_PHYCTL, tmp); /* For Probe Resposes */ tmp = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL); tmp = (tmp & ~BWN_TX_PHY_ANT) | ant; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PROBE_RESP_PHYCTL, tmp); } static void bwn_set_opmode(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t ctl; uint16_t cfp_pretbtt; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl &= ~(BWN_MACCTL_HOSTAP | BWN_MACCTL_PASS_CTL | BWN_MACCTL_PASS_BADPLCP | BWN_MACCTL_PASS_BADFCS | BWN_MACCTL_PROMISC | BWN_MACCTL_BEACON_PROMISC); ctl |= BWN_MACCTL_STA; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) ctl |= BWN_MACCTL_HOSTAP; else if (ic->ic_opmode == IEEE80211_M_IBSS) ctl &= ~BWN_MACCTL_STA; ctl |= sc->sc_filters; if (siba_get_revid(sc->sc_dev) <= 4) ctl |= BWN_MACCTL_PROMISC; BWN_WRITE_4(mac, BWN_MACCTL, ctl); cfp_pretbtt = 2; if ((ctl & BWN_MACCTL_STA) && !(ctl & BWN_MACCTL_HOSTAP)) { if (siba_get_chipid(sc->sc_dev) == 0x4306 && siba_get_chiprev(sc->sc_dev) == 3) cfp_pretbtt = 100; else cfp_pretbtt = 50; } BWN_WRITE_2(mac, 0x612, cfp_pretbtt); } static int bwn_dma_gettype(struct bwn_mac *mac) { uint32_t tmp; uint16_t base; tmp = BWN_READ_4(mac, SIBA_TGSHIGH); if (tmp & SIBA_TGSHIGH_DMA64) return (BWN_DMA_64BIT); base = bwn_dma_base(0, 0); BWN_WRITE_4(mac, base + BWN_DMA32_TXCTL, BWN_DMA32_TXADDREXT_MASK); tmp = BWN_READ_4(mac, base + BWN_DMA32_TXCTL); if (tmp & BWN_DMA32_TXADDREXT_MASK) return (BWN_DMA_32BIT); return (BWN_DMA_30BIT); } static void bwn_dma_ring_addr(void *arg, bus_dma_segment_t *seg, int nseg, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } void bwn_dummy_transmission(struct bwn_mac *mac, int ofdm, int paon) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; unsigned int i, max_loop; uint16_t value; uint32_t buffer[5] = { 0x00000000, 0x00d40000, 0x00000000, 0x01000000, 0x00000000 }; if (ofdm) { max_loop = 0x1e; buffer[0] = 0x000201cc; } else { max_loop = 0xfa; buffer[0] = 0x000b846e; } BWN_ASSERT_LOCKED(mac->mac_sc); for (i = 0; i < 5; i++) bwn_ram_write(mac, i * 4, buffer[i]); BWN_WRITE_2(mac, 0x0568, 0x0000); BWN_WRITE_2(mac, 0x07c0, (siba_get_revid(sc->sc_dev) < 11) ? 0x0000 : 0x0100); value = (ofdm ? 0x41 : 0x40); BWN_WRITE_2(mac, 0x050c, value); if (phy->type == BWN_PHYTYPE_N || phy->type == BWN_PHYTYPE_LP || phy->type == BWN_PHYTYPE_LCN) BWN_WRITE_2(mac, 0x0514, 0x1a02); BWN_WRITE_2(mac, 0x0508, 0x0000); BWN_WRITE_2(mac, 0x050a, 0x0000); BWN_WRITE_2(mac, 0x054c, 0x0000); BWN_WRITE_2(mac, 0x056a, 0x0014); BWN_WRITE_2(mac, 0x0568, 0x0826); BWN_WRITE_2(mac, 0x0500, 0x0000); /* XXX TODO: n phy pa override? */ switch (phy->type) { case BWN_PHYTYPE_N: case BWN_PHYTYPE_LCN: BWN_WRITE_2(mac, 0x0502, 0x00d0); break; case BWN_PHYTYPE_LP: BWN_WRITE_2(mac, 0x0502, 0x0050); break; default: BWN_WRITE_2(mac, 0x0502, 0x0030); break; } /* flush */ BWN_READ_2(mac, 0x0502); if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0017); for (i = 0x00; i < max_loop; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0080) break; DELAY(10); } for (i = 0x00; i < 0x0a; i++) { value = BWN_READ_2(mac, 0x050e); if (value & 0x0400) break; DELAY(10); } for (i = 0x00; i < 0x19; i++) { value = BWN_READ_2(mac, 0x0690); if (!(value & 0x0100)) break; DELAY(10); } if (phy->rf_ver == 0x2050 && phy->rf_rev <= 0x5) BWN_RF_WRITE(mac, 0x0051, 0x0037); } void bwn_ram_write(struct bwn_mac *mac, uint16_t offset, uint32_t val) { uint32_t macctl; KASSERT(offset % 4 == 0, ("%s:%d: fail", __func__, __LINE__)); macctl = BWN_READ_4(mac, BWN_MACCTL); if (macctl & BWN_MACCTL_BIGENDIAN) printf("TODO: need swap\n"); BWN_WRITE_4(mac, BWN_RAM_CONTROL, offset); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); BWN_WRITE_4(mac, BWN_RAM_DATA, val); } void bwn_mac_suspend(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; uint32_t tmp; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: suspended=%d\n", __func__, mac->mac_suspended); if (mac->mac_suspended == 0) { bwn_psctl(mac, BWN_PS_AWAKE); BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_ON); BWN_READ_4(mac, BWN_MACCTL); for (i = 35; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(10); } for (i = 40; i; i--) { tmp = BWN_READ_4(mac, BWN_INTR_REASON); if (tmp & BWN_INTR_MAC_SUSPENDED) goto out; DELAY(1000); } device_printf(sc->sc_dev, "MAC suspend failed\n"); } out: mac->mac_suspended++; } void bwn_mac_enable(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t state; DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: suspended=%d\n", __func__, mac->mac_suspended); state = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (state != BWN_SHARED_UCODESTAT_SUSPEND && state != BWN_SHARED_UCODESTAT_SLEEP) { DPRINTF(sc, BWN_DEBUG_FW, "%s: warn: firmware state (%d)\n", __func__, state); } mac->mac_suspended--; KASSERT(mac->mac_suspended >= 0, ("%s:%d: fail", __func__, __LINE__)); if (mac->mac_suspended == 0) { BWN_WRITE_4(mac, BWN_MACCTL, BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_ON); BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_MAC_SUSPENDED); BWN_READ_4(mac, BWN_MACCTL); BWN_READ_4(mac, BWN_INTR_REASON); bwn_psctl(mac, 0); } } void bwn_psctl(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; int i; uint16_t ucstat; KASSERT(!((flags & BWN_PS_ON) && (flags & BWN_PS_OFF)), ("%s:%d: fail", __func__, __LINE__)); KASSERT(!((flags & BWN_PS_AWAKE) && (flags & BWN_PS_ASLEEP)), ("%s:%d: fail", __func__, __LINE__)); /* XXX forcibly awake and hwps-off */ BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) | BWN_MACCTL_AWAKE) & ~BWN_MACCTL_HWPS); BWN_READ_4(mac, BWN_MACCTL); if (siba_get_revid(sc->sc_dev) >= 5) { for (i = 0; i < 100; i++) { ucstat = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODESTAT); if (ucstat != BWN_SHARED_UCODESTAT_SLEEP) break; DELAY(10); } } DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: ucstat=%d\n", __func__, ucstat); } static int bwn_fw_gets(struct bwn_mac *mac, enum bwn_fwtype type) { struct bwn_softc *sc = mac->mac_sc; struct bwn_fw *fw = &mac->mac_fw; const uint8_t rev = siba_get_revid(sc->sc_dev); const char *filename; uint32_t high; int error; /* microcode */ filename = NULL; switch (rev) { case 42: if (mac->mac_phy.type == BWN_PHYTYPE_AC) filename = "ucode42"; break; case 40: if (mac->mac_phy.type == BWN_PHYTYPE_AC) filename = "ucode40"; break; case 33: if (mac->mac_phy.type == BWN_PHYTYPE_LCN40) filename = "ucode33_lcn40"; break; case 30: if (mac->mac_phy.type == BWN_PHYTYPE_N) filename = "ucode30_mimo"; break; case 29: if (mac->mac_phy.type == BWN_PHYTYPE_HT) filename = "ucode29_mimo"; break; case 26: if (mac->mac_phy.type == BWN_PHYTYPE_HT) filename = "ucode26_mimo"; break; case 28: case 25: if (mac->mac_phy.type == BWN_PHYTYPE_N) filename = "ucode25_mimo"; else if (mac->mac_phy.type == BWN_PHYTYPE_LCN) filename = "ucode25_lcn"; break; case 24: if (mac->mac_phy.type == BWN_PHYTYPE_LCN) filename = "ucode24_lcn"; break; case 23: if (mac->mac_phy.type == BWN_PHYTYPE_N) filename = "ucode16_mimo"; break; case 16: case 17: case 18: case 19: if (mac->mac_phy.type == BWN_PHYTYPE_N) filename = "ucode16_mimo"; else if (mac->mac_phy.type == BWN_PHYTYPE_LP) filename = "ucode16_lp"; break; case 15: filename = "ucode15"; break; case 14: filename = "ucode14"; break; case 13: filename = "ucode13"; break; case 12: case 11: filename = "ucode11"; break; case 10: case 9: case 8: case 7: case 6: case 5: filename = "ucode5"; break; default: device_printf(sc->sc_dev, "no ucode for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } device_printf(sc->sc_dev, "ucode fw: %s\n", filename); error = bwn_fw_get(mac, type, filename, &fw->ucode); if (error) { bwn_release_firmware(mac); return (error); } /* PCM */ KASSERT(fw->no_pcmfile == 0, ("%s:%d fail", __func__, __LINE__)); if (rev >= 5 && rev <= 10) { error = bwn_fw_get(mac, type, "pcm5", &fw->pcm); if (error == ENOENT) fw->no_pcmfile = 1; else if (error) { bwn_release_firmware(mac); return (error); } } else if (rev < 11) { device_printf(sc->sc_dev, "no PCM for rev %d\n", rev); bwn_release_firmware(mac); return (EOPNOTSUPP); } /* initvals */ high = siba_read_4(sc->sc_dev, SIBA_TGSHIGH); switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev < 5 || rev > 10) goto fail1; if (high & BWN_TGSHIGH_HAVE_2GHZ) filename = "a0g1initvals5"; else filename = "a0g0initvals5"; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0initvals5"; else if (rev >= 13) filename = "b0g0initvals13"; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0initvals13"; else if (rev == 14) filename = "lp0initvals14"; else if (rev >= 15) filename = "lp0initvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev == 30) filename = "n16initvals30"; else if (rev == 28 || rev == 25) filename = "n0initvals25"; else if (rev == 24) filename = "n0initvals24"; else if (rev == 23) filename = "n0initvals16"; else if (rev >= 16 && rev <= 18) filename = "n0initvals16"; else if (rev >= 11 && rev <= 12) filename = "n0initvals11"; else goto fail1; break; default: goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals); if (error) { bwn_release_firmware(mac); return (error); } /* bandswitch initvals */ switch (mac->mac_phy.type) { case BWN_PHYTYPE_A: if (rev >= 5 && rev <= 10) { if (high & BWN_TGSHIGH_HAVE_2GHZ) filename = "a0g1bsinitvals5"; else filename = "a0g0bsinitvals5"; } else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_G: if (rev >= 5 && rev <= 10) filename = "b0g0bsinitvals5"; else if (rev >= 11) filename = NULL; else goto fail1; break; case BWN_PHYTYPE_LP: if (rev == 13) filename = "lp0bsinitvals13"; else if (rev == 14) filename = "lp0bsinitvals14"; else if (rev >= 15) filename = "lp0bsinitvals15"; else goto fail1; break; case BWN_PHYTYPE_N: if (rev == 30) filename = "n16bsinitvals30"; else if (rev == 28 || rev == 25) filename = "n0bsinitvals25"; else if (rev == 24) filename = "n0bsinitvals24"; else if (rev == 23) filename = "n0bsinitvals16"; else if (rev >= 16 && rev <= 18) filename = "n0bsinitvals16"; else if (rev >= 11 && rev <= 12) filename = "n0bsinitvals11"; else goto fail1; break; default: device_printf(sc->sc_dev, "unknown phy (%d)\n", mac->mac_phy.type); goto fail1; } error = bwn_fw_get(mac, type, filename, &fw->initvals_band); if (error) { bwn_release_firmware(mac); return (error); } return (0); fail1: device_printf(sc->sc_dev, "no INITVALS for rev %d, phy.type %d\n", rev, mac->mac_phy.type); bwn_release_firmware(mac); return (EOPNOTSUPP); } static int bwn_fw_get(struct bwn_mac *mac, enum bwn_fwtype type, const char *name, struct bwn_fwfile *bfw) { const struct bwn_fwhdr *hdr; struct bwn_softc *sc = mac->mac_sc; const struct firmware *fw; char namebuf[64]; if (name == NULL) { bwn_do_release_fw(bfw); return (0); } if (bfw->filename != NULL) { if (bfw->type == type && (strcmp(bfw->filename, name) == 0)) return (0); bwn_do_release_fw(bfw); } snprintf(namebuf, sizeof(namebuf), "bwn%s_v4_%s%s", (type == BWN_FWTYPE_OPENSOURCE) ? "-open" : "", (mac->mac_phy.type == BWN_PHYTYPE_LP) ? "lp_" : "", name); /* XXX Sleeping on "fwload" with the non-sleepable locks held */ fw = firmware_get(namebuf); if (fw == NULL) { device_printf(sc->sc_dev, "the fw file(%s) not found\n", namebuf); return (ENOENT); } if (fw->datasize < sizeof(struct bwn_fwhdr)) goto fail; hdr = (const struct bwn_fwhdr *)(fw->data); switch (hdr->type) { case BWN_FWTYPE_UCODE: case BWN_FWTYPE_PCM: if (be32toh(hdr->size) != (fw->datasize - sizeof(struct bwn_fwhdr))) goto fail; /* FALLTHROUGH */ case BWN_FWTYPE_IV: if (hdr->ver != 1) goto fail; break; default: goto fail; } bfw->filename = name; bfw->fw = fw; bfw->type = type; return (0); fail: device_printf(sc->sc_dev, "the fw file(%s) format error\n", namebuf); if (fw != NULL) firmware_put(fw, FIRMWARE_UNLOAD); return (EPROTO); } static void bwn_release_firmware(struct bwn_mac *mac) { bwn_do_release_fw(&mac->mac_fw.ucode); bwn_do_release_fw(&mac->mac_fw.pcm); bwn_do_release_fw(&mac->mac_fw.initvals); bwn_do_release_fw(&mac->mac_fw.initvals_band); } static void bwn_do_release_fw(struct bwn_fwfile *bfw) { if (bfw->fw != NULL) firmware_put(bfw->fw, FIRMWARE_UNLOAD); bfw->fw = NULL; bfw->filename = NULL; } static int bwn_fw_loaducode(struct bwn_mac *mac) { #define GETFWOFFSET(fwp, offset) \ ((const uint32_t *)((const char *)fwp.fw->data + offset)) #define GETFWSIZE(fwp, offset) \ ((fwp.fw->datasize - offset) / sizeof(uint32_t)) struct bwn_softc *sc = mac->mac_sc; const uint32_t *data; unsigned int i; uint32_t ctl; uint16_t date, fwcaps, time; int error = 0; ctl = BWN_READ_4(mac, BWN_MACCTL); ctl |= BWN_MACCTL_MCODE_JMP0; KASSERT(!(ctl & BWN_MACCTL_MCODE_RUN), ("%s:%d: fail", __func__, __LINE__)); BWN_WRITE_4(mac, BWN_MACCTL, ctl); for (i = 0; i < 64; i++) bwn_shm_write_2(mac, BWN_SCRATCH, i, 0); for (i = 0; i < 4096; i += 2) bwn_shm_write_2(mac, BWN_SHARED, i, 0); data = GETFWOFFSET(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_UCODE | BWN_SHARED_AUTOINC, 0x0000); for (i = 0; i < GETFWSIZE(mac->mac_fw.ucode, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } if (mac->mac_fw.pcm.fw) { data = GETFWOFFSET(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); bwn_shm_ctlword(mac, BWN_HW, 0x01ea); BWN_WRITE_4(mac, BWN_SHM_DATA, 0x00004000); bwn_shm_ctlword(mac, BWN_HW, 0x01eb); for (i = 0; i < GETFWSIZE(mac->mac_fw.pcm, sizeof(struct bwn_fwhdr)); i++) { BWN_WRITE_4(mac, BWN_SHM_DATA, be32toh(data[i])); DELAY(10); } } BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_ALL); BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_JMP0) | BWN_MACCTL_MCODE_RUN); for (i = 0; i < 21; i++) { if (BWN_READ_4(mac, BWN_INTR_REASON) == BWN_INTR_MAC_SUSPENDED) break; if (i >= 20) { device_printf(sc->sc_dev, "ucode timeout\n"); error = ENXIO; goto error; } DELAY(50000); } BWN_READ_4(mac, BWN_INTR_REASON); mac->mac_fw.rev = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_REV); if (mac->mac_fw.rev <= 0x128) { device_printf(sc->sc_dev, "the firmware is too old\n"); error = EOPNOTSUPP; goto error; } /* * Determine firmware header version; needed for TX/RX packet * handling. */ if (mac->mac_fw.rev >= 598) mac->mac_fw.fw_hdr_format = BWN_FW_HDR_598; else if (mac->mac_fw.rev >= 410) mac->mac_fw.fw_hdr_format = BWN_FW_HDR_410; else mac->mac_fw.fw_hdr_format = BWN_FW_HDR_351; /* * We don't support rev 598 or later; that requires * another round of changes to the TX/RX descriptor * and status layout. * * So, complain this is the case and exit out, rather * than attaching and then failing. */ #if 0 if (mac->mac_fw.fw_hdr_format == BWN_FW_HDR_598) { device_printf(sc->sc_dev, "firmware is too new (>=598); not supported\n"); error = EOPNOTSUPP; goto error; } #endif mac->mac_fw.patch = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_PATCH); date = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_DATE); mac->mac_fw.opensource = (date == 0xffff); if (bwn_wme != 0) mac->mac_flags |= BWN_MAC_FLAG_WME; mac->mac_flags |= BWN_MAC_FLAG_HWCRYPTO; time = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_UCODE_TIME); if (mac->mac_fw.opensource == 0) { device_printf(sc->sc_dev, "firmware version (rev %u patch %u date %#x time %#x)\n", mac->mac_fw.rev, mac->mac_fw.patch, date, time); if (mac->mac_fw.no_pcmfile) device_printf(sc->sc_dev, "no HW crypto acceleration due to pcm5\n"); } else { mac->mac_fw.patch = time; fwcaps = bwn_fwcaps_read(mac); if (!(fwcaps & BWN_FWCAPS_HWCRYPTO) || mac->mac_fw.no_pcmfile) { device_printf(sc->sc_dev, "disabling HW crypto acceleration\n"); mac->mac_flags &= ~BWN_MAC_FLAG_HWCRYPTO; } if (!(fwcaps & BWN_FWCAPS_WME)) { device_printf(sc->sc_dev, "disabling WME support\n"); mac->mac_flags &= ~BWN_MAC_FLAG_WME; } } if (BWN_ISOLDFMT(mac)) device_printf(sc->sc_dev, "using old firmware image\n"); return (0); error: BWN_WRITE_4(mac, BWN_MACCTL, (BWN_READ_4(mac, BWN_MACCTL) & ~BWN_MACCTL_MCODE_RUN) | BWN_MACCTL_MCODE_JMP0); return (error); #undef GETFWSIZE #undef GETFWOFFSET } /* OpenFirmware only */ static uint16_t bwn_fwcaps_read(struct bwn_mac *mac) { KASSERT(mac->mac_fw.opensource == 1, ("%s:%d: fail", __func__, __LINE__)); return (bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_FWCAPS)); } static int bwn_fwinitvals_write(struct bwn_mac *mac, const struct bwn_fwinitvals *ivals, size_t count, size_t array_size) { #define GET_NEXTIV16(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint16_t))) #define GET_NEXTIV32(iv) \ ((const struct bwn_fwinitvals *)((const uint8_t *)(iv) + \ sizeof(uint16_t) + sizeof(uint32_t))) struct bwn_softc *sc = mac->mac_sc; const struct bwn_fwinitvals *iv; uint16_t offset; size_t i; uint8_t bit32; KASSERT(sizeof(struct bwn_fwinitvals) == 6, ("%s:%d: fail", __func__, __LINE__)); iv = ivals; for (i = 0; i < count; i++) { if (array_size < sizeof(iv->offset_size)) goto fail; array_size -= sizeof(iv->offset_size); offset = be16toh(iv->offset_size); bit32 = (offset & BWN_FWINITVALS_32BIT) ? 1 : 0; offset &= BWN_FWINITVALS_OFFSET_MASK; if (offset >= 0x1000) goto fail; if (bit32) { if (array_size < sizeof(iv->data.d32)) goto fail; array_size -= sizeof(iv->data.d32); BWN_WRITE_4(mac, offset, be32toh(iv->data.d32)); iv = GET_NEXTIV32(iv); } else { if (array_size < sizeof(iv->data.d16)) goto fail; array_size -= sizeof(iv->data.d16); BWN_WRITE_2(mac, offset, be16toh(iv->data.d16)); iv = GET_NEXTIV16(iv); } } if (array_size != 0) goto fail; return (0); fail: device_printf(sc->sc_dev, "initvals: invalid format\n"); return (EPROTO); #undef GET_NEXTIV16 #undef GET_NEXTIV32 } int bwn_switch_channel(struct bwn_mac *mac, int chan) { struct bwn_phy *phy = &(mac->mac_phy); struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t channelcookie, savedcookie; int error; if (chan == 0xffff) chan = phy->get_default_chan(mac); channelcookie = chan; if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) channelcookie |= 0x100; savedcookie = bwn_shm_read_2(mac, BWN_SHARED, BWN_SHARED_CHAN); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, channelcookie); error = phy->switch_channel(mac, chan); if (error) goto fail; mac->mac_phy.chan = chan; DELAY(8000); return (0); fail: device_printf(sc->sc_dev, "failed to switch channel\n"); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_CHAN, savedcookie); return (error); } static uint16_t bwn_ant2phy(int antenna) { switch (antenna) { case BWN_ANT0: return (BWN_TX_PHY_ANT0); case BWN_ANT1: return (BWN_TX_PHY_ANT1); case BWN_ANT2: return (BWN_TX_PHY_ANT2); case BWN_ANT3: return (BWN_TX_PHY_ANT3); case BWN_ANTAUTO: return (BWN_TX_PHY_ANT01AUTO); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static void bwn_wme_load(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; KASSERT(N(bwn_wme_shm_offsets) == N(sc->sc_wmeParams), ("%s:%d: fail", __func__, __LINE__)); bwn_mac_suspend(mac); for (i = 0; i < N(sc->sc_wmeParams); i++) bwn_wme_loadparams(mac, &(sc->sc_wmeParams[i]), bwn_wme_shm_offsets[i]); bwn_mac_enable(mac); } static void bwn_wme_loadparams(struct bwn_mac *mac, const struct wmeParams *p, uint16_t shm_offset) { #define SM(_v, _f) (((_v) << _f##_S) & _f) struct bwn_softc *sc = mac->mac_sc; uint16_t params[BWN_NR_WMEPARAMS]; int slot, tmp; unsigned int i; slot = BWN_READ_2(mac, BWN_RNG) & SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); memset(¶ms, 0, sizeof(params)); DPRINTF(sc, BWN_DEBUG_WME, "wmep_txopLimit %d wmep_logcwmin %d " "wmep_logcwmax %d wmep_aifsn %d\n", p->wmep_txopLimit, p->wmep_logcwmin, p->wmep_logcwmax, p->wmep_aifsn); params[BWN_WMEPARAM_TXOP] = p->wmep_txopLimit * 32; params[BWN_WMEPARAM_CWMIN] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_CWMAX] = SM(p->wmep_logcwmax, WME_PARAM_LOGCWMAX); params[BWN_WMEPARAM_CWCUR] = SM(p->wmep_logcwmin, WME_PARAM_LOGCWMIN); params[BWN_WMEPARAM_AIFS] = p->wmep_aifsn; params[BWN_WMEPARAM_BSLOTS] = slot; params[BWN_WMEPARAM_REGGAP] = slot + p->wmep_aifsn; for (i = 0; i < N(params); i++) { if (i == BWN_WMEPARAM_STATUS) { tmp = bwn_shm_read_2(mac, BWN_SHARED, shm_offset + (i * 2)); tmp |= 0x100; bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), tmp); } else { bwn_shm_write_2(mac, BWN_SHARED, shm_offset + (i * 2), params[i]); } } } static void bwn_mac_write_bssid(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint32_t tmp; int i; uint8_t mac_bssid[IEEE80211_ADDR_LEN * 2]; bwn_mac_setfilter(mac, BWN_MACFILTER_BSSID, sc->sc_bssid); memcpy(mac_bssid, sc->sc_ic.ic_macaddr, IEEE80211_ADDR_LEN); memcpy(mac_bssid + IEEE80211_ADDR_LEN, sc->sc_bssid, IEEE80211_ADDR_LEN); for (i = 0; i < N(mac_bssid); i += sizeof(uint32_t)) { tmp = (uint32_t) (mac_bssid[i + 0]); tmp |= (uint32_t) (mac_bssid[i + 1]) << 8; tmp |= (uint32_t) (mac_bssid[i + 2]) << 16; tmp |= (uint32_t) (mac_bssid[i + 3]) << 24; bwn_ram_write(mac, 0x20 + i, tmp); } } static void bwn_mac_setfilter(struct bwn_mac *mac, uint16_t offset, const uint8_t *macaddr) { static const uint8_t zero[IEEE80211_ADDR_LEN] = { 0 }; uint16_t data; if (!mac) macaddr = zero; offset |= 0x0020; BWN_WRITE_2(mac, BWN_MACFILTER_CONTROL, offset); data = macaddr[0]; data |= macaddr[1] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[2]; data |= macaddr[3] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); data = macaddr[4]; data |= macaddr[5] << 8; BWN_WRITE_2(mac, BWN_MACFILTER_DATA, data); } static void bwn_key_dowrite(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key, size_t key_len, const uint8_t *mac_addr) { uint8_t buf[BWN_SEC_KEYSIZE] = { 0, }; uint8_t per_sta_keys_start = 8; if (BWN_SEC_NEWAPI(mac)) per_sta_keys_start = 4; KASSERT(index < mac->mac_max_nr_keys, ("%s:%d: fail", __func__, __LINE__)); KASSERT(key_len <= BWN_SEC_KEYSIZE, ("%s:%d: fail", __func__, __LINE__)); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, NULL); if (key) memcpy(buf, key, key_len); bwn_key_write(mac, index, algorithm, buf); if (index >= per_sta_keys_start) bwn_key_macwrite(mac, index, mac_addr); mac->mac_key[index].algorithm = algorithm; } static void bwn_key_macwrite(struct bwn_mac *mac, uint8_t index, const uint8_t *addr) { struct bwn_softc *sc = mac->mac_sc; uint32_t addrtmp[2] = { 0, 0 }; uint8_t start = 8; if (BWN_SEC_NEWAPI(mac)) start = 4; KASSERT(index >= start, ("%s:%d: fail", __func__, __LINE__)); index -= start; if (addr) { addrtmp[0] = addr[0]; addrtmp[0] |= ((uint32_t) (addr[1]) << 8); addrtmp[0] |= ((uint32_t) (addr[2]) << 16); addrtmp[0] |= ((uint32_t) (addr[3]) << 24); addrtmp[1] = addr[4]; addrtmp[1] |= ((uint32_t) (addr[5]) << 8); } if (siba_get_revid(sc->sc_dev) >= 5) { bwn_shm_write_4(mac, BWN_RCMTA, (index * 2) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_RCMTA, (index * 2) + 1, addrtmp[1]); } else { if (index >= 8) { bwn_shm_write_4(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 0, addrtmp[0]); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PSM + (index * 6) + 4, addrtmp[1]); } } } static void bwn_key_write(struct bwn_mac *mac, uint8_t index, uint8_t algorithm, const uint8_t *key) { unsigned int i; uint32_t offset; uint16_t kidx, value; kidx = BWN_SEC_KEY2FW(mac, index); bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_KEYIDX_BLOCK + (kidx * 2), (kidx << 4) | algorithm); offset = mac->mac_ktp + (index * BWN_SEC_KEYSIZE); for (i = 0; i < BWN_SEC_KEYSIZE; i += 2) { value = key[i]; value |= (uint16_t)(key[i + 1]) << 8; bwn_shm_write_2(mac, BWN_SHARED, offset + i, value); } } static void bwn_phy_exit(struct bwn_mac *mac) { mac->mac_phy.rf_onoff(mac, 0); if (mac->mac_phy.exit != NULL) mac->mac_phy.exit(mac); } static void bwn_dma_free(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringfree(&dma->rx); bwn_dma_ringfree(&dma->wme[WME_AC_BK]); bwn_dma_ringfree(&dma->wme[WME_AC_BE]); bwn_dma_ringfree(&dma->wme[WME_AC_VI]); bwn_dma_ringfree(&dma->wme[WME_AC_VO]); bwn_dma_ringfree(&dma->mcast); } static void bwn_core_stop(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return; callout_stop(&sc->sc_rfswitch_ch); callout_stop(&sc->sc_task_ch); callout_stop(&sc->sc_watchdog_ch); sc->sc_watchdog_timer = 0; BWN_WRITE_4(mac, BWN_INTR_MASK, 0); BWN_READ_4(mac, BWN_INTR_MASK); bwn_mac_suspend(mac); mac->mac_status = BWN_MAC_STATUS_INITED; } static int bwn_switch_band(struct bwn_softc *sc, struct ieee80211_channel *chan) { struct bwn_mac *up_dev = NULL; struct bwn_mac *down_dev; struct bwn_mac *mac; int err, status; uint8_t gmode; BWN_ASSERT_LOCKED(sc); TAILQ_FOREACH(mac, &sc->sc_maclist, mac_list) { if (IEEE80211_IS_CHAN_2GHZ(chan) && mac->mac_phy.supports_2ghz) { up_dev = mac; gmode = 1; } else if (IEEE80211_IS_CHAN_5GHZ(chan) && mac->mac_phy.supports_5ghz) { up_dev = mac; gmode = 0; } else { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (EINVAL); } if (up_dev != NULL) break; } if (up_dev == NULL) { device_printf(sc->sc_dev, "Could not find a device\n"); return (ENODEV); } if (up_dev == sc->sc_curmac && sc->sc_curmac->mac_phy.gmode == gmode) return (0); DPRINTF(sc, BWN_DEBUG_RF | BWN_DEBUG_PHY | BWN_DEBUG_RESET, "switching to %s-GHz band\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); down_dev = sc->sc_curmac; status = down_dev->mac_status; if (status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(down_dev); if (status >= BWN_MAC_STATUS_INITED) bwn_core_exit(down_dev); if (down_dev != up_dev) bwn_phy_reset(down_dev); up_dev->mac_phy.gmode = gmode; if (status >= BWN_MAC_STATUS_INITED) { err = bwn_core_init(up_dev); if (err) { device_printf(sc->sc_dev, "fatal: failed to initialize for %s-GHz\n", IEEE80211_IS_CHAN_2GHZ(chan) ? "2" : "5"); goto fail; } } if (status >= BWN_MAC_STATUS_STARTED) bwn_core_start(up_dev); KASSERT(up_dev->mac_status == status, ("%s: fail", __func__)); sc->sc_curmac = up_dev; return (0); fail: sc->sc_curmac = NULL; return (err); } static void bwn_rf_turnon(struct bwn_mac *mac) { DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: called\n", __func__); bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 1); mac->mac_phy.rf_on = 1; bwn_mac_enable(mac); } static void bwn_rf_turnoff(struct bwn_mac *mac) { DPRINTF(mac->mac_sc, BWN_DEBUG_RESET, "%s: called\n", __func__); bwn_mac_suspend(mac); mac->mac_phy.rf_onoff(mac, 0); mac->mac_phy.rf_on = 0; bwn_mac_enable(mac); } /* * PHY reset. */ static void bwn_phy_reset(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; siba_write_4(sc->sc_dev, SIBA_TGSLOW, ((siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~BWN_TGSLOW_SUPPORT_G) | BWN_TGSLOW_PHYRESET) | SIBA_TGSLOW_FGC); DELAY(1000); siba_write_4(sc->sc_dev, SIBA_TGSLOW, (siba_read_4(sc->sc_dev, SIBA_TGSLOW) & ~SIBA_TGSLOW_FGC)); DELAY(1000); } static int bwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct bwn_vap *bvp = BWN_VAP(vap); struct ieee80211com *ic= vap->iv_ic; enum ieee80211_state ostate = vap->iv_state; struct bwn_softc *sc = ic->ic_softc; struct bwn_mac *mac = sc->sc_curmac; int error; DPRINTF(sc, BWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); error = bvp->bv_newstate(vap, nstate, arg); if (error != 0) return (error); BWN_LOCK(sc); bwn_led_newstate(mac, nstate); /* * Clear the BSSID when we stop a STA */ if (vap->iv_opmode == IEEE80211_M_STA) { if (ostate == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) { /* * Clear out the BSSID. If we reassociate to * the same AP, this will reinialize things * correctly... */ if (ic->ic_opmode == IEEE80211_M_STA && (sc->sc_flags & BWN_FLAG_INVALID) == 0) { memset(sc->sc_bssid, 0, IEEE80211_ADDR_LEN); bwn_set_macaddr(mac); } } } if (vap->iv_opmode == IEEE80211_M_MONITOR || vap->iv_opmode == IEEE80211_M_AHDEMO) { /* XXX nothing to do? */ } else if (nstate == IEEE80211_S_RUN) { memcpy(sc->sc_bssid, vap->iv_bss->ni_bssid, IEEE80211_ADDR_LEN); bwn_set_opmode(mac); bwn_set_pretbtt(mac); bwn_spu_setdelay(mac, 0); bwn_set_macaddr(mac); } BWN_UNLOCK(sc); return (error); } static void bwn_set_pretbtt(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t pretbtt; if (ic->ic_opmode == IEEE80211_M_IBSS) pretbtt = 2; else pretbtt = (mac->mac_phy.type == BWN_PHYTYPE_A) ? 120 : 250; bwn_shm_write_2(mac, BWN_SHARED, BWN_SHARED_PRETBTT, pretbtt); BWN_WRITE_2(mac, BWN_TSF_CFP_PRETBTT, pretbtt); } static int bwn_intr(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint32_t reason; if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) return (FILTER_STRAY); DPRINTF(sc, BWN_DEBUG_INTR, "%s: called\n", __func__); reason = BWN_READ_4(mac, BWN_INTR_REASON); if (reason == 0xffffffff) /* shared IRQ */ return (FILTER_STRAY); reason &= mac->mac_intr_mask; if (reason == 0) return (FILTER_HANDLED); DPRINTF(sc, BWN_DEBUG_INTR, "%s: reason=0x%08x\n", __func__, reason); mac->mac_reason[0] = BWN_READ_4(mac, BWN_DMA0_REASON) & 0x0001dc00; mac->mac_reason[1] = BWN_READ_4(mac, BWN_DMA1_REASON) & 0x0000dc00; mac->mac_reason[2] = BWN_READ_4(mac, BWN_DMA2_REASON) & 0x0000dc00; mac->mac_reason[3] = BWN_READ_4(mac, BWN_DMA3_REASON) & 0x0001dc00; mac->mac_reason[4] = BWN_READ_4(mac, BWN_DMA4_REASON) & 0x0000dc00; BWN_WRITE_4(mac, BWN_INTR_REASON, reason); BWN_WRITE_4(mac, BWN_DMA0_REASON, mac->mac_reason[0]); BWN_WRITE_4(mac, BWN_DMA1_REASON, mac->mac_reason[1]); BWN_WRITE_4(mac, BWN_DMA2_REASON, mac->mac_reason[2]); BWN_WRITE_4(mac, BWN_DMA3_REASON, mac->mac_reason[3]); BWN_WRITE_4(mac, BWN_DMA4_REASON, mac->mac_reason[4]); /* Disable interrupts. */ BWN_WRITE_4(mac, BWN_INTR_MASK, 0); mac->mac_reason_intr = reason; BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); taskqueue_enqueue(sc->sc_tq, &mac->mac_intrtask); return (FILTER_HANDLED); } static void bwn_intrtask(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint32_t merged = 0; int i, tx = 0, rx = 0; BWN_LOCK(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED || (sc->sc_flags & BWN_FLAG_INVALID)) { BWN_UNLOCK(sc); return; } for (i = 0; i < N(mac->mac_reason); i++) merged |= mac->mac_reason[i]; if (mac->mac_reason_intr & BWN_INTR_MAC_TXERR) device_printf(sc->sc_dev, "MAC trans error\n"); if (mac->mac_reason_intr & BWN_INTR_PHY_TXERR) { DPRINTF(sc, BWN_DEBUG_INTR, "%s: PHY trans error\n", __func__); mac->mac_phy.txerrors--; if (mac->mac_phy.txerrors == 0) { mac->mac_phy.txerrors = BWN_TXERROR_MAX; bwn_restart(mac, "PHY TX errors"); } } if (merged & (BWN_DMAINTR_FATALMASK | BWN_DMAINTR_NONFATALMASK)) { if (merged & BWN_DMAINTR_FATALMASK) { device_printf(sc->sc_dev, "Fatal DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); bwn_restart(mac, "DMA error"); BWN_UNLOCK(sc); return; } if (merged & BWN_DMAINTR_NONFATALMASK) { device_printf(sc->sc_dev, "DMA error: %#x %#x %#x %#x %#x %#x\n", mac->mac_reason[0], mac->mac_reason[1], mac->mac_reason[2], mac->mac_reason[3], mac->mac_reason[4], mac->mac_reason[5]); } } if (mac->mac_reason_intr & BWN_INTR_UCODE_DEBUG) bwn_intr_ucode_debug(mac); if (mac->mac_reason_intr & BWN_INTR_TBTT_INDI) bwn_intr_tbtt_indication(mac); if (mac->mac_reason_intr & BWN_INTR_ATIM_END) bwn_intr_atim_end(mac); if (mac->mac_reason_intr & BWN_INTR_BEACON) bwn_intr_beacon(mac); if (mac->mac_reason_intr & BWN_INTR_PMQ) bwn_intr_pmq(mac); if (mac->mac_reason_intr & BWN_INTR_NOISESAMPLE_OK) bwn_intr_noise(mac); if (mac->mac_flags & BWN_MAC_FLAG_DMA) { if (mac->mac_reason[0] & BWN_DMAINTR_RX_DONE) { bwn_dma_rx(mac->mac_method.dma.rx); rx = 1; } } else rx = bwn_pio_rx(&mac->mac_method.pio.rx); KASSERT(!(mac->mac_reason[1] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[2] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[3] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[4] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); KASSERT(!(mac->mac_reason[5] & BWN_DMAINTR_RX_DONE), ("%s", __func__)); if (mac->mac_reason_intr & BWN_INTR_TX_OK) { bwn_intr_txeof(mac); tx = 1; } BWN_WRITE_4(mac, BWN_INTR_MASK, mac->mac_intr_mask); if (sc->sc_blink_led != NULL && sc->sc_led_blink) { int evt = BWN_LED_EVENT_NONE; if (tx && rx) { if (sc->sc_rx_rate > sc->sc_tx_rate) evt = BWN_LED_EVENT_RX; else evt = BWN_LED_EVENT_TX; } else if (tx) { evt = BWN_LED_EVENT_TX; } else if (rx) { evt = BWN_LED_EVENT_RX; } else if (rx == 0) { evt = BWN_LED_EVENT_POLL; } if (evt != BWN_LED_EVENT_NONE) bwn_led_event(mac, evt); } if (mbufq_first(&sc->sc_snd) != NULL) bwn_start(sc); BWN_BARRIER(mac, BUS_SPACE_BARRIER_READ); BWN_BARRIER(mac, BUS_SPACE_BARRIER_WRITE); BWN_UNLOCK(sc); } static void bwn_restart(struct bwn_mac *mac, const char *msg) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (mac->mac_status < BWN_MAC_STATUS_INITED) return; device_printf(sc->sc_dev, "HW reset: %s\n", msg); ieee80211_runtask(ic, &mac->mac_hwreset); } static void bwn_intr_ucode_debug(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; if (mac->mac_fw.opensource == 0) return; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG); switch (reason) { case BWN_DEBUGINTR_PANIC: bwn_handle_fwpanic(mac); break; case BWN_DEBUGINTR_DUMP_SHM: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_SHM\n"); break; case BWN_DEBUGINTR_DUMP_REGS: device_printf(sc->sc_dev, "BWN_DEBUGINTR_DUMP_REGS\n"); break; case BWN_DEBUGINTR_MARKER: device_printf(sc->sc_dev, "BWN_DEBUGINTR_MARKER\n"); break; default: device_printf(sc->sc_dev, "ucode debug unknown reason: %#x\n", reason); } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_DEBUGINTR_REASON_REG, BWN_DEBUGINTR_ACK); } static void bwn_intr_tbtt_indication(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (ic->ic_opmode != IEEE80211_M_HOSTAP) bwn_psctl(mac, 0); if (ic->ic_opmode == IEEE80211_M_IBSS) mac->mac_flags |= BWN_MAC_FLAG_DFQVALID; } static void bwn_intr_atim_end(struct bwn_mac *mac) { if (mac->mac_flags & BWN_MAC_FLAG_DFQVALID) { BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_DFQ_VALID); mac->mac_flags &= ~BWN_MAC_FLAG_DFQVALID; } } static void bwn_intr_beacon(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t cmd, beacon0, beacon1; if (ic->ic_opmode == IEEE80211_M_HOSTAP || ic->ic_opmode == IEEE80211_M_MBSS) return; mac->mac_intr_mask &= ~BWN_INTR_BEACON; cmd = BWN_READ_4(mac, BWN_MACCMD); beacon0 = (cmd & BWN_MACCMD_BEACON0_VALID); beacon1 = (cmd & BWN_MACCMD_BEACON1_VALID); if (beacon0 && beacon1) { BWN_WRITE_4(mac, BWN_INTR_REASON, BWN_INTR_BEACON); mac->mac_intr_mask |= BWN_INTR_BEACON; return; } if (sc->sc_flags & BWN_FLAG_NEED_BEACON_TP) { sc->sc_flags &= ~BWN_FLAG_NEED_BEACON_TP; bwn_load_beacon0(mac); bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else { if (!beacon0) { bwn_load_beacon0(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON0_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } else if (!beacon1) { bwn_load_beacon1(mac); cmd = BWN_READ_4(mac, BWN_MACCMD); cmd |= BWN_MACCMD_BEACON1_VALID; BWN_WRITE_4(mac, BWN_MACCMD, cmd); } } } static void bwn_intr_pmq(struct bwn_mac *mac) { uint32_t tmp; while (1) { tmp = BWN_READ_4(mac, BWN_PS_STATUS); if (!(tmp & 0x00000008)) break; } BWN_WRITE_2(mac, BWN_PS_STATUS, 0x0002); } static void bwn_intr_noise(struct bwn_mac *mac) { struct bwn_phy_g *pg = &mac->mac_phy.phy_g; uint16_t tmp; uint8_t noise[4]; uint8_t i, j; int32_t average; if (mac->mac_phy.type != BWN_PHYTYPE_G) return; KASSERT(mac->mac_noise.noi_running, ("%s: fail", __func__)); *((uint32_t *)noise) = htole32(bwn_jssi_read(mac)); if (noise[0] == 0x7f || noise[1] == 0x7f || noise[2] == 0x7f || noise[3] == 0x7f) goto new; KASSERT(mac->mac_noise.noi_nsamples < 8, ("%s:%d: fail", __func__, __LINE__)); i = mac->mac_noise.noi_nsamples; noise[0] = MIN(MAX(noise[0], 0), N(pg->pg_nrssi_lt) - 1); noise[1] = MIN(MAX(noise[1], 0), N(pg->pg_nrssi_lt) - 1); noise[2] = MIN(MAX(noise[2], 0), N(pg->pg_nrssi_lt) - 1); noise[3] = MIN(MAX(noise[3], 0), N(pg->pg_nrssi_lt) - 1); mac->mac_noise.noi_samples[i][0] = pg->pg_nrssi_lt[noise[0]]; mac->mac_noise.noi_samples[i][1] = pg->pg_nrssi_lt[noise[1]]; mac->mac_noise.noi_samples[i][2] = pg->pg_nrssi_lt[noise[2]]; mac->mac_noise.noi_samples[i][3] = pg->pg_nrssi_lt[noise[3]]; mac->mac_noise.noi_nsamples++; if (mac->mac_noise.noi_nsamples == 8) { average = 0; for (i = 0; i < 8; i++) { for (j = 0; j < 4; j++) average += mac->mac_noise.noi_samples[i][j]; } average = (((average / 32) * 125) + 64) / 128; tmp = (bwn_shm_read_2(mac, BWN_SHARED, 0x40c) / 128) & 0x1f; if (tmp >= 8) average += 2; else average -= 25; average -= (tmp == 8) ? 72 : 48; mac->mac_stats.link_noise = average; mac->mac_noise.noi_running = 0; return; } new: bwn_noise_gensample(mac); } static int bwn_pio_rx(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; unsigned int i; BWN_ASSERT_LOCKED(sc); if (mac->mac_status < BWN_MAC_STATUS_STARTED) return (0); for (i = 0; i < 5000; i++) { if (bwn_pio_rxeof(prq) == 0) break; } if (i >= 5000) device_printf(sc->sc_dev, "too many RX frames in PIO mode\n"); return ((i > 0) ? 1 : 0); } static void bwn_dma_rx(struct bwn_dma_ring *dr) { int slot, curslot; KASSERT(!dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); curslot = dr->get_curslot(dr); KASSERT(curslot >= 0 && curslot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); slot = dr->dr_curslot; for (; slot != curslot; slot = bwn_dma_nextslot(dr, slot)) bwn_dma_rxeof(dr, &slot); bus_dmamap_sync(dr->dr_ring_dtag, dr->dr_ring_dmap, BUS_DMASYNC_PREWRITE); dr->set_curslot(dr, slot); dr->dr_curslot = slot; } static void bwn_intr_txeof(struct bwn_mac *mac) { struct bwn_txstatus stat; uint32_t stat0, stat1; uint16_t tmp; BWN_ASSERT_LOCKED(mac->mac_sc); while (1) { stat0 = BWN_READ_4(mac, BWN_XMITSTAT_0); if (!(stat0 & 0x00000001)) break; stat1 = BWN_READ_4(mac, BWN_XMITSTAT_1); DPRINTF(mac->mac_sc, BWN_DEBUG_XMIT, "%s: stat0=0x%08x, stat1=0x%08x\n", __func__, stat0, stat1); stat.cookie = (stat0 >> 16); stat.seq = (stat1 & 0x0000ffff); stat.phy_stat = ((stat1 & 0x00ff0000) >> 16); tmp = (stat0 & 0x0000ffff); stat.framecnt = ((tmp & 0xf000) >> 12); stat.rtscnt = ((tmp & 0x0f00) >> 8); stat.sreason = ((tmp & 0x001c) >> 2); stat.pm = (tmp & 0x0080) ? 1 : 0; stat.im = (tmp & 0x0040) ? 1 : 0; stat.ampdu = (tmp & 0x0020) ? 1 : 0; stat.ack = (tmp & 0x0002) ? 1 : 0; DPRINTF(mac->mac_sc, BWN_DEBUG_XMIT, "%s: cookie=%d, seq=%d, phystat=0x%02x, framecnt=%d, " "rtscnt=%d, sreason=%d, pm=%d, im=%d, ampdu=%d, ack=%d\n", __func__, stat.cookie, stat.seq, stat.phy_stat, stat.framecnt, stat.rtscnt, stat.sreason, stat.pm, stat.im, stat.ampdu, stat.ack); bwn_handle_txeof(mac, &stat); } } static void bwn_hwreset(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; int error = 0; int prev_status; BWN_LOCK(sc); prev_status = mac->mac_status; if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_stop(mac); if (prev_status >= BWN_MAC_STATUS_INITED) bwn_core_exit(mac); if (prev_status >= BWN_MAC_STATUS_INITED) { error = bwn_core_init(mac); if (error) goto out; } if (prev_status >= BWN_MAC_STATUS_STARTED) bwn_core_start(mac); out: if (error) { device_printf(sc->sc_dev, "%s: failed (%d)\n", __func__, error); sc->sc_curmac = NULL; } BWN_UNLOCK(sc); } static void bwn_handle_fwpanic(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; uint16_t reason; reason = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_FWPANIC_REASON_REG); device_printf(sc->sc_dev,"fw panic (%u)\n", reason); if (reason == BWN_FWPANIC_RESTART) bwn_restart(mac, "ucode panic"); } static void bwn_load_beacon0(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static void bwn_load_beacon1(struct bwn_mac *mac) { KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } static uint32_t bwn_jssi_read(struct bwn_mac *mac) { uint32_t val = 0; val = bwn_shm_read_2(mac, BWN_SHARED, 0x08a); val <<= 16; val |= bwn_shm_read_2(mac, BWN_SHARED, 0x088); return (val); } static void bwn_noise_gensample(struct bwn_mac *mac) { uint32_t jssi = 0x7f7f7f7f; bwn_shm_write_2(mac, BWN_SHARED, 0x088, (jssi & 0x0000ffff)); bwn_shm_write_2(mac, BWN_SHARED, 0x08a, (jssi & 0xffff0000) >> 16); BWN_WRITE_4(mac, BWN_MACCMD, BWN_READ_4(mac, BWN_MACCMD) | BWN_MACCMD_BGNOISE); } static int bwn_dma_freeslot(struct bwn_dma_ring *dr) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); return (dr->dr_numslots - dr->dr_usedslot); } static int bwn_dma_nextslot(struct bwn_dma_ring *dr, int slot) { BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(slot >= -1 && slot <= dr->dr_numslots - 1, ("%s:%d: fail", __func__, __LINE__)); if (slot == dr->dr_numslots - 1) return (0); return (slot + 1); } static void bwn_dma_rxeof(struct bwn_dma_ring *dr, int *slot) { struct bwn_mac *mac = dr->dr_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_rxhdr4 *rxhdr; struct mbuf *m; uint32_t macstat; int32_t tmp; int cnt = 0; uint16_t len; dr->getdesc(dr, *slot, &desc, &meta); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_POSTREAD); m = meta->mt_m; if (bwn_dma_newbuf(dr, desc, meta, 0)) { counter_u64_add(sc->sc_ic.ic_ierrors, 1); return; } rxhdr = mtod(m, struct bwn_rxhdr4 *); len = le16toh(rxhdr->frame_len); if (len <= 0) { counter_u64_add(sc->sc_ic.ic_ierrors, 1); return; } if (bwn_dma_check_redzone(dr, m)) { device_printf(sc->sc_dev, "redzone error.\n"); bwn_dma_set_redzone(dr, m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); return; } if (len > dr->dr_rx_bufsize) { tmp = len; while (1) { dr->getdesc(dr, *slot, &desc, &meta); bwn_dma_set_redzone(dr, meta->mt_m); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); *slot = bwn_dma_nextslot(dr, *slot); cnt++; tmp -= dr->dr_rx_bufsize; if (tmp <= 0) break; } device_printf(sc->sc_dev, "too small buffer " "(len %u buffer %u dropped %d)\n", len, dr->dr_rx_bufsize, cnt); return; } switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: macstat = le32toh(rxhdr->ps4.r351.mac_status); break; case BWN_FW_HDR_598: macstat = le32toh(rxhdr->ps4.r598.mac_status); break; } if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "RX drop\n"); return; } } m->m_len = m->m_pkthdr.len = len + dr->dr_frameoffset; m_adj(m, dr->dr_frameoffset); bwn_rxeof(dr->dr_mac, m, rxhdr); } static void bwn_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_softc *sc = mac->mac_sc; struct bwn_stats *stats = &mac->mac_stats; BWN_ASSERT_LOCKED(mac->mac_sc); if (status->im) device_printf(sc->sc_dev, "TODO: STATUS IM\n"); if (status->ampdu) device_printf(sc->sc_dev, "TODO: STATUS AMPDU\n"); if (status->rtscnt) { if (status->rtscnt == 0xf) stats->rtsfail++; else stats->rts++; } if (mac->mac_flags & BWN_MAC_FLAG_DMA) { bwn_dma_handle_txeof(mac, status); } else { bwn_pio_handle_txeof(mac, status); } bwn_phy_txpower_check(mac, 0); } static uint8_t bwn_pio_rxeof(struct bwn_pio_rxqueue *prq) { struct bwn_mac *mac = prq->prq_mac; struct bwn_softc *sc = mac->mac_sc; struct bwn_rxhdr4 rxhdr; struct mbuf *m; uint32_t ctl32, macstat, v32; unsigned int i, padding; uint16_t ctl16, len, totlen, v16; unsigned char *mp; char *data; memset(&rxhdr, 0, sizeof(rxhdr)); if (prq->prq_rev >= 8) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (!(ctl32 & BWN_PIO8_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXCTL); if (ctl32 & BWN_PIO8_RXCTL_DATAREADY) goto ready; DELAY(10); } } else { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (!(ctl16 & BWN_PIO_RXCTL_FRAMEREADY)) return (0); bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_FRAMEREADY); for (i = 0; i < 10; i++) { ctl16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXCTL); if (ctl16 & BWN_PIO_RXCTL_DATAREADY) goto ready; DELAY(10); } } device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (1); ready: if (prq->prq_rev >= 8) siba_read_multi_4(sc->sc_dev, &rxhdr, sizeof(rxhdr), prq->prq_base + BWN_PIO8_RXDATA); else siba_read_multi_2(sc->sc_dev, &rxhdr, sizeof(rxhdr), prq->prq_base + BWN_PIO_RXDATA); len = le16toh(rxhdr.frame_len); if (len > 0x700) { device_printf(sc->sc_dev, "%s: len is too big\n", __func__); goto error; } if (len == 0) { device_printf(sc->sc_dev, "%s: len is 0\n", __func__); goto error; } switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: macstat = le32toh(rxhdr.ps4.r351.mac_status); break; case BWN_FW_HDR_598: macstat = le32toh(rxhdr.ps4.r598.mac_status); break; } if (macstat & BWN_RX_MAC_FCSERR) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADFCS)) { device_printf(sc->sc_dev, "%s: FCS error", __func__); goto error; } } padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; totlen = len + padding; KASSERT(totlen <= MCLBYTES, ("too big..\n")); m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->sc_dev, "%s: out of memory", __func__); goto error; } mp = mtod(m, unsigned char *); if (prq->prq_rev >= 8) { siba_read_multi_4(sc->sc_dev, mp, (totlen & ~3), prq->prq_base + BWN_PIO8_RXDATA); if (totlen & 3) { v32 = bwn_pio_rx_read_4(prq, BWN_PIO8_RXDATA); data = &(mp[totlen - 1]); switch (totlen & 3) { case 3: *data = (v32 >> 16); data--; case 2: *data = (v32 >> 8); data--; case 1: *data = v32; } } } else { siba_read_multi_2(sc->sc_dev, mp, (totlen & ~1), prq->prq_base + BWN_PIO_RXDATA); if (totlen & 1) { v16 = bwn_pio_rx_read_2(prq, BWN_PIO_RXDATA); mp[totlen - 1] = v16; } } m->m_len = m->m_pkthdr.len = totlen; bwn_rxeof(prq->prq_mac, m, &rxhdr); return (1); error: if (prq->prq_rev >= 8) bwn_pio_rx_write_4(prq, BWN_PIO8_RXCTL, BWN_PIO8_RXCTL_DATAREADY); else bwn_pio_rx_write_2(prq, BWN_PIO_RXCTL, BWN_PIO_RXCTL_DATAREADY); return (1); } static int bwn_dma_newbuf(struct bwn_dma_ring *dr, struct bwn_dmadesc_generic *desc, struct bwn_dmadesc_meta *meta, int init) { struct bwn_mac *mac = dr->dr_mac; struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_rxhdr4 *hdr; bus_dmamap_t map; bus_addr_t paddr; struct mbuf *m; int error; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { error = ENOBUFS; /* * If the NIC is up and running, we need to: * - Clear RX buffer's header. * - Restore RX descriptor settings. */ if (init) return (error); else goto back; } m->m_len = m->m_pkthdr.len = MCLBYTES; bwn_dma_set_redzone(dr, m); /* * Try to load RX buf into temporary DMA map */ error = bus_dmamap_load_mbuf(dma->rxbuf_dtag, dr->dr_spare_dmap, m, bwn_dma_buf_addr, &paddr, BUS_DMA_NOWAIT); if (error) { m_freem(m); /* * See the comment above */ if (init) return (error); else goto back; } if (!init) bus_dmamap_unload(dma->rxbuf_dtag, meta->mt_dmap); meta->mt_m = m; meta->mt_paddr = paddr; /* * Swap RX buf's DMA map with the loaded temporary one */ map = meta->mt_dmap; meta->mt_dmap = dr->dr_spare_dmap; dr->dr_spare_dmap = map; back: /* * Clear RX buf header */ hdr = mtod(meta->mt_m, struct bwn_rxhdr4 *); bzero(hdr, sizeof(*hdr)); bus_dmamap_sync(dma->rxbuf_dtag, meta->mt_dmap, BUS_DMASYNC_PREWRITE); /* * Setup RX buf descriptor */ dr->setdesc(dr, desc, meta->mt_paddr, meta->mt_m->m_len - sizeof(*hdr), 0, 0, 0); return (error); } static void bwn_dma_buf_addr(void *arg, bus_dma_segment_t *seg, int nseg, bus_size_t mapsz __unused, int error) { if (!error) { KASSERT(nseg == 1, ("too many segments(%d)\n", nseg)); *((bus_addr_t *)arg) = seg->ds_addr; } } static int bwn_hwrate2ieeerate(int rate) { switch (rate) { case BWN_CCK_RATE_1MB: return (2); case BWN_CCK_RATE_2MB: return (4); case BWN_CCK_RATE_5MB: return (11); case BWN_CCK_RATE_11MB: return (22); case BWN_OFDM_RATE_6MB: return (12); case BWN_OFDM_RATE_9MB: return (18); case BWN_OFDM_RATE_12MB: return (24); case BWN_OFDM_RATE_18MB: return (36); case BWN_OFDM_RATE_24MB: return (48); case BWN_OFDM_RATE_36MB: return (72); case BWN_OFDM_RATE_48MB: return (96); case BWN_OFDM_RATE_54MB: return (108); default: printf("Ooops\n"); return (0); } } /* * Post process the RX provided RSSI. * * Valid for A, B, G, LP PHYs. */ static int8_t bwn_rx_rssi_calc(struct bwn_mac *mac, uint8_t in_rssi, int ofdm, int adjust_2053, int adjust_2050) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_g *gphy = &phy->phy_g; int tmp; switch (phy->rf_ver) { case 0x2050: if (ofdm) { tmp = in_rssi; if (tmp > 127) tmp -= 256; tmp = tmp * 73 / 64; if (adjust_2050) tmp += 25; else tmp -= 3; } else { if (siba_sprom_get_bf_lo(mac->mac_sc->sc_dev) & BWN_BFL_RSSI) { if (in_rssi > 63) in_rssi = 63; tmp = gphy->pg_nrssi_lt[in_rssi]; tmp = (31 - tmp) * -131 / 128 - 57; } else { tmp = in_rssi; tmp = (31 - tmp) * -149 / 128 - 68; } if (phy->type == BWN_PHYTYPE_G && adjust_2050) tmp += 25; } break; case 0x2060: if (in_rssi > 127) tmp = in_rssi - 256; else tmp = in_rssi; break; default: tmp = in_rssi; tmp = (tmp - 11) * 103 / 64; if (adjust_2053) tmp -= 109; else tmp -= 83; } return (tmp); } static void bwn_rxeof(struct bwn_mac *mac, struct mbuf *m, const void *_rxhdr) { const struct bwn_rxhdr4 *rxhdr = _rxhdr; struct bwn_plcp6 *plcp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame_min *wh; struct ieee80211_node *ni; struct ieee80211com *ic = &sc->sc_ic; uint32_t macstat; int padding, rate, rssi = 0, noise = 0, type; uint16_t phytype, phystat0, phystat3, chanstat; unsigned char *mp = mtod(m, unsigned char *); static int rx_mac_dec_rpt = 0; BWN_ASSERT_LOCKED(sc); phystat0 = le16toh(rxhdr->phy_status0); /* * XXX Note: phy_status3 doesn't exist for HT-PHY; it's only * used for LP-PHY. */ phystat3 = le16toh(rxhdr->ps3.lp.phy_status3); switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: macstat = le32toh(rxhdr->ps4.r351.mac_status); chanstat = le16toh(rxhdr->ps4.r351.channel); break; case BWN_FW_HDR_598: macstat = le32toh(rxhdr->ps4.r598.mac_status); chanstat = le16toh(rxhdr->ps4.r598.channel); break; } phytype = chanstat & BWN_RX_CHAN_PHYTYPE; if (macstat & BWN_RX_MAC_FCSERR) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_FCS_CRC\n"); if (phystat0 & (BWN_RX_PHYST0_PLCPHCF | BWN_RX_PHYST0_PLCPFV)) device_printf(sc->sc_dev, "TODO RX: RX_FLAG_FAILED_PLCP_CRC\n"); if (macstat & BWN_RX_MAC_DECERR) goto drop; padding = (macstat & BWN_RX_MAC_PADDING) ? 2 : 0; if (m->m_pkthdr.len < (sizeof(struct bwn_plcp6) + padding)) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } plcp = (struct bwn_plcp6 *)(mp + padding); m_adj(m, sizeof(struct bwn_plcp6) + padding); if (m->m_pkthdr.len < IEEE80211_MIN_LEN) { device_printf(sc->sc_dev, "frame too short (length=%d)\n", m->m_pkthdr.len); goto drop; } wh = mtod(m, struct ieee80211_frame_min *); if (macstat & BWN_RX_MAC_DEC && rx_mac_dec_rpt++ < 50) device_printf(sc->sc_dev, "RX decryption attempted (old %d keyidx %#x)\n", BWN_ISOLDFMT(mac), (macstat & BWN_RX_MAC_KEYIDX) >> BWN_RX_MAC_KEYIDX_SHIFT); if (phystat0 & BWN_RX_PHYST0_OFDM) rate = bwn_plcp_get_ofdmrate(mac, plcp, phytype == BWN_PHYTYPE_A); else rate = bwn_plcp_get_cckrate(mac, plcp); if (rate == -1) { if (!(mac->mac_sc->sc_filters & BWN_MACCTL_PASS_BADPLCP)) goto drop; } sc->sc_rx_rate = bwn_hwrate2ieeerate(rate); /* rssi/noise */ switch (phytype) { case BWN_PHYTYPE_A: case BWN_PHYTYPE_B: case BWN_PHYTYPE_G: case BWN_PHYTYPE_LP: rssi = bwn_rx_rssi_calc(mac, rxhdr->phy.abg.rssi, !! (phystat0 & BWN_RX_PHYST0_OFDM), !! (phystat0 & BWN_RX_PHYST0_GAINCTL), !! (phystat3 & BWN_RX_PHYST3_TRSTATE)); break; case BWN_PHYTYPE_N: /* Broadcom has code for min/avg, but always used max */ if (rxhdr->phy.n.power0 == 16 || rxhdr->phy.n.power0 == 32) rssi = max(rxhdr->phy.n.power1, rxhdr->ps2.n.power2); else rssi = max(rxhdr->phy.n.power0, rxhdr->phy.n.power1); #if 0 DPRINTF(mac->mac_sc, BWN_DEBUG_RECV, "%s: power0=%d, power1=%d, power2=%d\n", __func__, rxhdr->phy.n.power0, rxhdr->phy.n.power1, rxhdr->ps2.n.power2); #endif break; default: /* XXX TODO: implement rssi for other PHYs */ break; } /* * RSSI here is absolute, not relative to the noise floor. */ noise = mac->mac_stats.link_noise; rssi = rssi - noise; /* RX radio tap */ if (ieee80211_radiotap_active(ic)) bwn_rx_radiotap(mac, m, rxhdr, plcp, rate, rssi, noise); m_adj(m, -IEEE80211_CRC_LEN); BWN_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, wh); if (ni != NULL) { type = ieee80211_input(ni, m, rssi, noise); ieee80211_free_node(ni); } else type = ieee80211_input_all(ic, m, rssi, noise); BWN_LOCK(sc); return; drop: device_printf(sc->sc_dev, "%s: dropped\n", __func__); } static void bwn_ratectl_tx_complete(const struct ieee80211_node *ni, const struct bwn_txstatus *status) { struct ieee80211_ratectl_tx_status txs; int retrycnt = 0; /* * If we don't get an ACK, then we should log the * full framecnt. That may be 0 if it's a PHY * failure, so ensure that gets logged as some * retry attempt. */ txs.flags = IEEE80211_RATECTL_STATUS_LONG_RETRY; if (status->ack) { txs.status = IEEE80211_RATECTL_TX_SUCCESS; retrycnt = status->framecnt - 1; } else { txs.status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; retrycnt = status->framecnt; if (retrycnt == 0) retrycnt = 1; } txs.long_retries = retrycnt; ieee80211_ratectl_tx_complete(ni, &txs); } static void bwn_dma_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_dmadesc_generic *desc; struct bwn_dmadesc_meta *meta; struct bwn_softc *sc = mac->mac_sc; int slot; BWN_ASSERT_LOCKED(sc); dr = bwn_dma_parse_cookie(mac, status, status->cookie, &slot); if (dr == NULL) { device_printf(sc->sc_dev, "failed to parse cookie\n"); return; } KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); while (1) { KASSERT(slot >= 0 && slot < dr->dr_numslots, ("%s:%d: fail", __func__, __LINE__)); dr->getdesc(dr, slot, &desc, &meta); if (meta->mt_txtype == BWN_DMADESC_METATYPE_HEADER) bus_dmamap_unload(dr->dr_txring_dtag, meta->mt_dmap); else if (meta->mt_txtype == BWN_DMADESC_METATYPE_BODY) bus_dmamap_unload(dma->txbuf_dtag, meta->mt_dmap); if (meta->mt_islast) { KASSERT(meta->mt_m != NULL, ("%s:%d: fail", __func__, __LINE__)); bwn_ratectl_tx_complete(meta->mt_ni, status); ieee80211_tx_complete(meta->mt_ni, meta->mt_m, 0); meta->mt_ni = NULL; meta->mt_m = NULL; } else KASSERT(meta->mt_m == NULL, ("%s:%d: fail", __func__, __LINE__)); dr->dr_usedslot--; if (meta->mt_islast) break; slot = bwn_dma_nextslot(dr, slot); } sc->sc_watchdog_timer = 0; if (dr->dr_stop) { KASSERT(bwn_dma_freeslot(dr) >= BWN_TX_SLOTS_PER_FRAME, ("%s:%d: fail", __func__, __LINE__)); dr->dr_stop = 0; } } static void bwn_pio_handle_txeof(struct bwn_mac *mac, const struct bwn_txstatus *status) { struct bwn_pio_txqueue *tq; struct bwn_pio_txpkt *tp = NULL; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); tq = bwn_pio_parse_cookie(mac, status->cookie, &tp); if (tq == NULL) return; tq->tq_used -= roundup(tp->tp_m->m_pkthdr.len + BWN_HDRSIZE(mac), 4); tq->tq_free++; /* XXX ieee80211_tx_complete()? */ if (tp->tp_ni != NULL) { /* * Do any tx complete callback. Note this must * be done before releasing the node reference. */ bwn_ratectl_tx_complete(tp->tp_ni, status); if (tp->tp_m->m_flags & M_TXCB) ieee80211_process_callback(tp->tp_ni, tp->tp_m, 0); ieee80211_free_node(tp->tp_ni); tp->tp_ni = NULL; } m_freem(tp->tp_m); tp->tp_m = NULL; TAILQ_INSERT_TAIL(&tq->tq_pktlist, tp, tp_list); sc->sc_watchdog_timer = 0; } static void bwn_phy_txpower_check(struct bwn_mac *mac, uint32_t flags) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy *phy = &mac->mac_phy; struct ieee80211com *ic = &sc->sc_ic; unsigned long now; bwn_txpwr_result_t result; BWN_GETTIME(now); if (!(flags & BWN_TXPWR_IGNORE_TIME) && ieee80211_time_before(now, phy->nexttime)) return; phy->nexttime = now + 2 * 1000; if (siba_get_pci_subvendor(sc->sc_dev) == SIBA_BOARDVENDOR_BCM && siba_get_pci_subdevice(sc->sc_dev) == SIBA_BOARD_BU4306) return; if (phy->recalc_txpwr != NULL) { result = phy->recalc_txpwr(mac, (flags & BWN_TXPWR_IGNORE_TSSI) ? 1 : 0); if (result == BWN_TXPWR_RES_DONE) return; KASSERT(result == BWN_TXPWR_RES_NEED_ADJUST, ("%s: fail", __func__)); KASSERT(phy->set_txpwr != NULL, ("%s: fail", __func__)); ieee80211_runtask(ic, &mac->mac_txpower); } } static uint16_t bwn_pio_rx_read_2(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_2(prq->prq_mac, prq->prq_base + offset)); } static uint32_t bwn_pio_rx_read_4(struct bwn_pio_rxqueue *prq, uint16_t offset) { return (BWN_READ_4(prq->prq_mac, prq->prq_base + offset)); } static void bwn_pio_rx_write_2(struct bwn_pio_rxqueue *prq, uint16_t offset, uint16_t value) { BWN_WRITE_2(prq->prq_mac, prq->prq_base + offset, value); } static void bwn_pio_rx_write_4(struct bwn_pio_rxqueue *prq, uint16_t offset, uint32_t value) { BWN_WRITE_4(prq->prq_mac, prq->prq_base + offset, value); } static int bwn_ieeerate2hwrate(struct bwn_softc *sc, int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return (BWN_OFDM_RATE_6MB); case 18: return (BWN_OFDM_RATE_9MB); case 24: return (BWN_OFDM_RATE_12MB); case 36: return (BWN_OFDM_RATE_18MB); case 48: return (BWN_OFDM_RATE_24MB); case 72: return (BWN_OFDM_RATE_36MB); case 96: return (BWN_OFDM_RATE_48MB); case 108: return (BWN_OFDM_RATE_54MB); /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return (BWN_CCK_RATE_1MB); case 4: return (BWN_CCK_RATE_2MB); case 11: return (BWN_CCK_RATE_5MB); case 22: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "unsupported rate %d\n", rate); return (BWN_CCK_RATE_1MB); } static uint16_t bwn_set_txhdr_phyctl1(struct bwn_mac *mac, uint8_t bitrate) { struct bwn_phy *phy = &mac->mac_phy; uint16_t control = 0; uint16_t bw; /* XXX TODO: this is for LP phy, what about N-PHY, etc? */ bw = BWN_TXH_PHY1_BW_20; if (BWN_ISCCKRATE(bitrate) && phy->type != BWN_PHYTYPE_LP) { control = bw; } else { control = bw; /* Figure out coding rate and modulation */ /* XXX TODO: table-ize, for MCS transmit */ /* Note: this is BWN_*_RATE values */ switch (bitrate) { case BWN_CCK_RATE_1MB: control |= 0; break; case BWN_CCK_RATE_2MB: control |= 1; break; case BWN_CCK_RATE_5MB: control |= 2; break; case BWN_CCK_RATE_11MB: control |= 3; break; case BWN_OFDM_RATE_6MB: control |= BWN_TXH_PHY1_CRATE_1_2; control |= BWN_TXH_PHY1_MODUL_BPSK; break; case BWN_OFDM_RATE_9MB: control |= BWN_TXH_PHY1_CRATE_3_4; control |= BWN_TXH_PHY1_MODUL_BPSK; break; case BWN_OFDM_RATE_12MB: control |= BWN_TXH_PHY1_CRATE_1_2; control |= BWN_TXH_PHY1_MODUL_QPSK; break; case BWN_OFDM_RATE_18MB: control |= BWN_TXH_PHY1_CRATE_3_4; control |= BWN_TXH_PHY1_MODUL_QPSK; break; case BWN_OFDM_RATE_24MB: control |= BWN_TXH_PHY1_CRATE_1_2; control |= BWN_TXH_PHY1_MODUL_QAM16; break; case BWN_OFDM_RATE_36MB: control |= BWN_TXH_PHY1_CRATE_3_4; control |= BWN_TXH_PHY1_MODUL_QAM16; break; case BWN_OFDM_RATE_48MB: control |= BWN_TXH_PHY1_CRATE_1_2; control |= BWN_TXH_PHY1_MODUL_QAM64; break; case BWN_OFDM_RATE_54MB: control |= BWN_TXH_PHY1_CRATE_3_4; control |= BWN_TXH_PHY1_MODUL_QAM64; break; default: break; } control |= BWN_TXH_PHY1_MODE_SISO; } return control; } static int bwn_set_txhdr(struct bwn_mac *mac, struct ieee80211_node *ni, struct mbuf *m, struct bwn_txhdr *txhdr, uint16_t cookie) { const struct bwn_phy *phy = &mac->mac_phy; struct bwn_softc *sc = mac->mac_sc; struct ieee80211_frame *wh; struct ieee80211_frame *protwh; struct ieee80211_frame_cts *cts; struct ieee80211_frame_rts *rts; const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct mbuf *mprot; unsigned int len; uint32_t macctl = 0; int protdur, rts_rate, rts_rate_fb, ismcast, isshort, rix, type; uint16_t phyctl = 0; uint8_t rate, rate_fb; int fill_phy_ctl1 = 0; wh = mtod(m, struct ieee80211_frame *); memset(txhdr, 0, sizeof(*txhdr)); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; if ((phy->type == BWN_PHYTYPE_N) || (phy->type == BWN_PHYTYPE_LP) || (phy->type == BWN_PHYTYPE_HT)) fill_phy_ctl1 = 1; /* * Find TX rate */ if (type != IEEE80211_FC0_TYPE_DATA || (m->m_flags & M_EAPOL)) rate = rate_fb = tp->mgmtrate; else if (ismcast) rate = rate_fb = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = rate_fb = tp->ucastrate; else { rix = ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; if (rix > 0) rate_fb = ni->ni_rates.rs_rates[rix - 1] & IEEE80211_RATE_VAL; else rate_fb = rate; } sc->sc_tx_rate = rate; /* Note: this maps the select ieee80211 rate to hardware rate */ rate = bwn_ieeerate2hwrate(sc, rate); rate_fb = bwn_ieeerate2hwrate(sc, rate_fb); txhdr->phyrate = (BWN_ISOFDMRATE(rate)) ? bwn_plcp_getofdm(rate) : bwn_plcp_getcck(rate); bcopy(wh->i_fc, txhdr->macfc, sizeof(txhdr->macfc)); bcopy(wh->i_addr1, txhdr->addr1, IEEE80211_ADDR_LEN); /* XXX rate/rate_fb is the hardware rate */ if ((rate_fb == rate) || (*(u_int16_t *)wh->i_dur & htole16(0x8000)) || (*(u_int16_t *)wh->i_dur == htole16(0))) txhdr->dur_fb = *(u_int16_t *)wh->i_dur; else txhdr->dur_fb = ieee80211_compute_duration(ic->ic_rt, m->m_pkthdr.len, rate, isshort); /* XXX TX encryption */ switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->body.r351.plcp), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate); break; case BWN_FW_HDR_410: bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->body.r410.plcp), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate); break; case BWN_FW_HDR_598: bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->body.r598.plcp), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate); break; } bwn_plcp_genhdr((struct bwn_plcp4 *)(&txhdr->plcp_fb), m->m_pkthdr.len + IEEE80211_CRC_LEN, rate_fb); txhdr->eftypes |= (BWN_ISOFDMRATE(rate_fb)) ? BWN_TX_EFT_FB_OFDM : BWN_TX_EFT_FB_CCK; txhdr->chan = phy->chan; phyctl |= (BWN_ISOFDMRATE(rate)) ? BWN_TX_PHY_ENC_OFDM : BWN_TX_PHY_ENC_CCK; /* XXX preamble? obey net80211 */ if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) phyctl |= BWN_TX_PHY_SHORTPRMBL; if (! phy->gmode) macctl |= BWN_TX_MAC_5GHZ; /* XXX TX antenna selection */ switch (bwn_antenna_sanitize(mac, 0)) { case 0: phyctl |= BWN_TX_PHY_ANT01AUTO; break; case 1: phyctl |= BWN_TX_PHY_ANT0; break; case 2: phyctl |= BWN_TX_PHY_ANT1; break; case 3: phyctl |= BWN_TX_PHY_ANT2; break; case 4: phyctl |= BWN_TX_PHY_ANT3; break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } if (!ismcast) macctl |= BWN_TX_MAC_ACK; macctl |= (BWN_TX_MAC_HWSEQ | BWN_TX_MAC_START_MSDU); if (!IEEE80211_IS_MULTICAST(wh->i_addr1) && m->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) macctl |= BWN_TX_MAC_LONGFRAME; if (ic->ic_flags & IEEE80211_F_USEPROT) { /* Note: don't fall back to CCK rates for 5G */ if (phy->gmode) rts_rate = BWN_CCK_RATE_1MB; else rts_rate = BWN_OFDM_RATE_6MB; rts_rate_fb = bwn_get_fbrate(rts_rate); /* XXX 'rate' here is hardware rate now, not the net80211 rate */ protdur = ieee80211_compute_duration(ic->ic_rt, m->m_pkthdr.len, rate, isshort) + + ieee80211_ack_duration(ic->ic_rt, rate, isshort); if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) { switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: cts = (struct ieee80211_frame_cts *) txhdr->body.r351.rts_frame; break; case BWN_FW_HDR_410: cts = (struct ieee80211_frame_cts *) txhdr->body.r410.rts_frame; break; case BWN_FW_HDR_598: cts = (struct ieee80211_frame_cts *) txhdr->body.r598.rts_frame; break; } mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, protdur); KASSERT(mprot != NULL, ("failed to alloc mbuf\n")); bcopy(mtod(mprot, uint8_t *), (uint8_t *)cts, mprot->m_pkthdr.len); m_freem(mprot); macctl |= BWN_TX_MAC_SEND_CTSTOSELF; len = sizeof(struct ieee80211_frame_cts); } else { switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: rts = (struct ieee80211_frame_rts *) txhdr->body.r351.rts_frame; break; case BWN_FW_HDR_410: rts = (struct ieee80211_frame_rts *) txhdr->body.r410.rts_frame; break; case BWN_FW_HDR_598: rts = (struct ieee80211_frame_rts *) txhdr->body.r598.rts_frame; break; } /* XXX rate/rate_fb is the hardware rate */ protdur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, protdur); KASSERT(mprot != NULL, ("failed to alloc mbuf\n")); bcopy(mtod(mprot, uint8_t *), (uint8_t *)rts, mprot->m_pkthdr.len); m_freem(mprot); macctl |= BWN_TX_MAC_SEND_RTSCTS; len = sizeof(struct ieee80211_frame_rts); } len += IEEE80211_CRC_LEN; switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: bwn_plcp_genhdr((struct bwn_plcp4 *) &txhdr->body.r351.rts_plcp, len, rts_rate); break; case BWN_FW_HDR_410: bwn_plcp_genhdr((struct bwn_plcp4 *) &txhdr->body.r410.rts_plcp, len, rts_rate); break; case BWN_FW_HDR_598: bwn_plcp_genhdr((struct bwn_plcp4 *) &txhdr->body.r598.rts_plcp, len, rts_rate); break; } bwn_plcp_genhdr((struct bwn_plcp4 *)&txhdr->rts_plcp_fb, len, rts_rate_fb); switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: protwh = (struct ieee80211_frame *) &txhdr->body.r351.rts_frame; break; case BWN_FW_HDR_410: protwh = (struct ieee80211_frame *) &txhdr->body.r410.rts_frame; break; case BWN_FW_HDR_598: protwh = (struct ieee80211_frame *) &txhdr->body.r598.rts_frame; break; } txhdr->rts_dur_fb = *(u_int16_t *)protwh->i_dur; if (BWN_ISOFDMRATE(rts_rate)) { txhdr->eftypes |= BWN_TX_EFT_RTS_OFDM; txhdr->phyrate_rts = bwn_plcp_getofdm(rts_rate); } else { txhdr->eftypes |= BWN_TX_EFT_RTS_CCK; txhdr->phyrate_rts = bwn_plcp_getcck(rts_rate); } txhdr->eftypes |= (BWN_ISOFDMRATE(rts_rate_fb)) ? BWN_TX_EFT_RTS_FBOFDM : BWN_TX_EFT_RTS_FBCCK; if (fill_phy_ctl1) { txhdr->phyctl_1rts = htole16(bwn_set_txhdr_phyctl1(mac, rts_rate)); txhdr->phyctl_1rtsfb = htole16(bwn_set_txhdr_phyctl1(mac, rts_rate_fb)); } } if (fill_phy_ctl1) { txhdr->phyctl_1 = htole16(bwn_set_txhdr_phyctl1(mac, rate)); txhdr->phyctl_1fb = htole16(bwn_set_txhdr_phyctl1(mac, rate_fb)); } switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: txhdr->body.r351.cookie = htole16(cookie); break; case BWN_FW_HDR_410: txhdr->body.r410.cookie = htole16(cookie); break; case BWN_FW_HDR_598: txhdr->body.r598.cookie = htole16(cookie); break; } txhdr->macctl = htole32(macctl); txhdr->phyctl = htole16(phyctl); /* * TX radio tap */ if (ieee80211_radiotap_active_vap(vap)) { sc->sc_tx_th.wt_flags = 0; if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_WEP; if (isshort && (rate == BWN_CCK_RATE_2MB || rate == BWN_CCK_RATE_5MB || rate == BWN_CCK_RATE_11MB)) sc->sc_tx_th.wt_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; sc->sc_tx_th.wt_rate = rate; ieee80211_radiotap_tx(vap, m); } return (0); } static void bwn_plcp_genhdr(struct bwn_plcp4 *plcp, const uint16_t octets, const uint8_t rate) { uint32_t d, plen; uint8_t *raw = plcp->o.raw; if (BWN_ISOFDMRATE(rate)) { d = bwn_plcp_getofdm(rate); KASSERT(!(octets & 0xf000), ("%s:%d: fail", __func__, __LINE__)); d |= (octets << 5); plcp->o.data = htole32(d); } else { plen = octets * 16 / rate; if ((octets * 16 % rate) > 0) { plen++; if ((rate == BWN_CCK_RATE_11MB) && ((octets * 8 % 11) < 4)) { raw[1] = 0x84; } else raw[1] = 0x04; } else raw[1] = 0x04; plcp->o.data |= htole32(plen << 16); raw[0] = bwn_plcp_getcck(rate); } } static uint8_t bwn_antenna_sanitize(struct bwn_mac *mac, uint8_t n) { struct bwn_softc *sc = mac->mac_sc; uint8_t mask; if (n == 0) return (0); if (mac->mac_phy.gmode) mask = siba_sprom_get_ant_bg(sc->sc_dev); else mask = siba_sprom_get_ant_a(sc->sc_dev); if (!(mask & (1 << (n - 1)))) return (0); return (n); } /* * Return a fallback rate for the given rate. * * Note: Don't fall back from OFDM to CCK. */ static uint8_t bwn_get_fbrate(uint8_t bitrate) { switch (bitrate) { /* CCK */ case BWN_CCK_RATE_1MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_2MB: return (BWN_CCK_RATE_1MB); case BWN_CCK_RATE_5MB: return (BWN_CCK_RATE_2MB); case BWN_CCK_RATE_11MB: return (BWN_CCK_RATE_5MB); /* OFDM */ case BWN_OFDM_RATE_6MB: return (BWN_OFDM_RATE_6MB); case BWN_OFDM_RATE_9MB: return (BWN_OFDM_RATE_6MB); case BWN_OFDM_RATE_12MB: return (BWN_OFDM_RATE_9MB); case BWN_OFDM_RATE_18MB: return (BWN_OFDM_RATE_12MB); case BWN_OFDM_RATE_24MB: return (BWN_OFDM_RATE_18MB); case BWN_OFDM_RATE_36MB: return (BWN_OFDM_RATE_24MB); case BWN_OFDM_RATE_48MB: return (BWN_OFDM_RATE_36MB); case BWN_OFDM_RATE_54MB: return (BWN_OFDM_RATE_48MB); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (0); } static uint32_t bwn_pio_write_multi_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint32_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; uint32_t value = 0; const uint8_t *data = _data; ctl |= BWN_PIO8_TXCTL_0_7 | BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31; bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); siba_write_multi_4(sc->sc_dev, data, (len & ~3), tq->tq_base + BWN_PIO8_TXDATA); if (len & 3) { ctl &= ~(BWN_PIO8_TXCTL_8_15 | BWN_PIO8_TXCTL_16_23 | BWN_PIO8_TXCTL_24_31); data = &(data[len - 1]); switch (len & 3) { case 3: ctl |= BWN_PIO8_TXCTL_16_23; value |= (uint32_t)(*data) << 16; data--; case 2: ctl |= BWN_PIO8_TXCTL_8_15; value |= (uint32_t)(*data) << 8; data--; case 1: value |= (uint32_t)(*data); } bwn_pio_write_4(mac, tq, BWN_PIO8_TXCTL, ctl); bwn_pio_write_4(mac, tq, BWN_PIO8_TXDATA, value); } return (ctl); } static void bwn_pio_write_4(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t offset, uint32_t value) { BWN_WRITE_4(mac, tq->tq_base + offset, value); } static uint16_t bwn_pio_write_multi_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, const void *_data, int len) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *data = _data; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); siba_write_multi_2(sc->sc_dev, data, (len & ~1), tq->tq_base + BWN_PIO_TXDATA); if (len & 1) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data[len - 1]); } return (ctl); } static uint16_t bwn_pio_write_mbuf_2(struct bwn_mac *mac, struct bwn_pio_txqueue *tq, uint16_t ctl, struct mbuf *m0) { int i, j = 0; uint16_t data = 0; const uint8_t *buf; struct mbuf *m = m0; ctl |= BWN_PIO_TXCTL_WRITELO | BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); for (; m != NULL; m = m->m_next) { buf = mtod(m, const uint8_t *); for (i = 0; i < m->m_len; i++) { if (!((j++) % 2)) data |= buf[i]; else { data |= (buf[i] << 8); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); data = 0; } } } if (m0->m_pkthdr.len % 2) { ctl &= ~BWN_PIO_TXCTL_WRITEHI; BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXCTL, ctl); BWN_PIO_WRITE_2(mac, tq, BWN_PIO_TXDATA, data); } return (ctl); } static void bwn_set_slot_time(struct bwn_mac *mac, uint16_t time) { /* XXX should exit if 5GHz band .. */ if (mac->mac_phy.type != BWN_PHYTYPE_G) return; BWN_WRITE_2(mac, 0x684, 510 + time); /* Disabled in Linux b43, can adversely effect performance */ #if 0 bwn_shm_write_2(mac, BWN_SHARED, 0x0010, time); #endif } static struct bwn_dma_ring * bwn_dma_select(struct bwn_mac *mac, uint8_t prio) { if ((mac->mac_flags & BWN_MAC_FLAG_WME) == 0) return (mac->mac_method.dma.wme[WME_AC_BE]); switch (prio) { case 3: return (mac->mac_method.dma.wme[WME_AC_VO]); case 2: return (mac->mac_method.dma.wme[WME_AC_VI]); case 0: return (mac->mac_method.dma.wme[WME_AC_BE]); case 1: return (mac->mac_method.dma.wme[WME_AC_BK]); } KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); return (NULL); } static int bwn_dma_getslot(struct bwn_dma_ring *dr) { int slot; BWN_ASSERT_LOCKED(dr->dr_mac->mac_sc); KASSERT(dr->dr_tx, ("%s:%d: fail", __func__, __LINE__)); KASSERT(!(dr->dr_stop), ("%s:%d: fail", __func__, __LINE__)); KASSERT(bwn_dma_freeslot(dr) != 0, ("%s:%d: fail", __func__, __LINE__)); slot = bwn_dma_nextslot(dr, dr->dr_curslot); KASSERT(!(slot & ~0x0fff), ("%s:%d: fail", __func__, __LINE__)); dr->dr_curslot = slot; dr->dr_usedslot++; return (slot); } static struct bwn_pio_txqueue * bwn_pio_parse_cookie(struct bwn_mac *mac, uint16_t cookie, struct bwn_pio_txpkt **pack) { struct bwn_pio *pio = &mac->mac_method.pio; struct bwn_pio_txqueue *tq = NULL; unsigned int index; switch (cookie & 0xf000) { case 0x1000: tq = &pio->wme[WME_AC_BK]; break; case 0x2000: tq = &pio->wme[WME_AC_BE]; break; case 0x3000: tq = &pio->wme[WME_AC_VI]; break; case 0x4000: tq = &pio->wme[WME_AC_VO]; break; case 0x5000: tq = &pio->mcast; break; } KASSERT(tq != NULL, ("%s:%d: fail", __func__, __LINE__)); if (tq == NULL) return (NULL); index = (cookie & 0x0fff); KASSERT(index < N(tq->tq_pkts), ("%s:%d: fail", __func__, __LINE__)); if (index >= N(tq->tq_pkts)) return (NULL); *pack = &tq->tq_pkts[index]; KASSERT(*pack != NULL, ("%s:%d: fail", __func__, __LINE__)); return (tq); } static void bwn_txpwr(void *arg, int npending) { struct bwn_mac *mac = arg; struct bwn_softc *sc; if (mac == NULL) return; sc = mac->mac_sc; BWN_LOCK(sc); if (mac->mac_status >= BWN_MAC_STATUS_STARTED && mac->mac_phy.set_txpwr != NULL) mac->mac_phy.set_txpwr(mac); BWN_UNLOCK(sc); } static void bwn_task_15s(struct bwn_mac *mac) { uint16_t reg; if (mac->mac_fw.opensource) { reg = bwn_shm_read_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG); if (reg) { bwn_restart(mac, "fw watchdog"); return; } bwn_shm_write_2(mac, BWN_SCRATCH, BWN_WATCHDOG_REG, 1); } if (mac->mac_phy.task_15s) mac->mac_phy.task_15s(mac); mac->mac_phy.txerrors = BWN_TXERROR_MAX; } static void bwn_task_30s(struct bwn_mac *mac) { if (mac->mac_phy.type != BWN_PHYTYPE_G || mac->mac_noise.noi_running) return; mac->mac_noise.noi_running = 1; mac->mac_noise.noi_nsamples = 0; bwn_noise_gensample(mac); } static void bwn_task_60s(struct bwn_mac *mac) { if (mac->mac_phy.task_60s) mac->mac_phy.task_60s(mac); bwn_phy_txpower_check(mac, BWN_TXPWR_IGNORE_TIME); } static void bwn_tasks(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(sc); if (mac->mac_status != BWN_MAC_STATUS_STARTED) return; if (mac->mac_task_state % 4 == 0) bwn_task_60s(mac); if (mac->mac_task_state % 2 == 0) bwn_task_30s(mac); bwn_task_15s(mac); mac->mac_task_state++; callout_reset(&sc->sc_task_ch, hz * 15, bwn_tasks, mac); } static int bwn_plcp_get_ofdmrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp, uint8_t a) { struct bwn_softc *sc = mac->mac_sc; KASSERT(a == 0, ("not support APHY\n")); switch (plcp->o.raw[0] & 0xf) { case 0xb: return (BWN_OFDM_RATE_6MB); case 0xf: return (BWN_OFDM_RATE_9MB); case 0xa: return (BWN_OFDM_RATE_12MB); case 0xe: return (BWN_OFDM_RATE_18MB); case 0x9: return (BWN_OFDM_RATE_24MB); case 0xd: return (BWN_OFDM_RATE_36MB); case 0x8: return (BWN_OFDM_RATE_48MB); case 0xc: return (BWN_OFDM_RATE_54MB); } device_printf(sc->sc_dev, "incorrect OFDM rate %d\n", plcp->o.raw[0] & 0xf); return (-1); } static int bwn_plcp_get_cckrate(struct bwn_mac *mac, struct bwn_plcp6 *plcp) { struct bwn_softc *sc = mac->mac_sc; switch (plcp->o.raw[0]) { case 0x0a: return (BWN_CCK_RATE_1MB); case 0x14: return (BWN_CCK_RATE_2MB); case 0x37: return (BWN_CCK_RATE_5MB); case 0x6e: return (BWN_CCK_RATE_11MB); } device_printf(sc->sc_dev, "incorrect CCK rate %d\n", plcp->o.raw[0]); return (-1); } static void bwn_rx_radiotap(struct bwn_mac *mac, struct mbuf *m, const struct bwn_rxhdr4 *rxhdr, struct bwn_plcp6 *plcp, int rate, int rssi, int noise) { struct bwn_softc *sc = mac->mac_sc; const struct ieee80211_frame_min *wh; uint64_t tsf; uint16_t low_mactime_now; uint16_t mt; if (htole16(rxhdr->phy_status0) & BWN_RX_PHYST0_SHORTPRMBL) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; wh = mtod(m, const struct ieee80211_frame_min *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) sc->sc_rx_th.wr_flags |= IEEE80211_RADIOTAP_F_WEP; bwn_tsf_read(mac, &tsf); low_mactime_now = tsf; tsf = tsf & ~0xffffULL; switch (mac->mac_fw.fw_hdr_format) { case BWN_FW_HDR_351: case BWN_FW_HDR_410: mt = le16toh(rxhdr->ps4.r351.mac_time); break; case BWN_FW_HDR_598: mt = le16toh(rxhdr->ps4.r598.mac_time); break; } tsf += mt; if (low_mactime_now < mt) tsf -= 0x10000; sc->sc_rx_th.wr_tsf = tsf; sc->sc_rx_th.wr_rate = rate; sc->sc_rx_th.wr_antsignal = rssi; sc->sc_rx_th.wr_antnoise = noise; } static void bwn_tsf_read(struct bwn_mac *mac, uint64_t *tsf) { uint32_t low, high; KASSERT(siba_get_revid(mac->mac_sc->sc_dev) >= 3, ("%s:%d: fail", __func__, __LINE__)); low = BWN_READ_4(mac, BWN_REV3PLUS_TSF_LOW); high = BWN_READ_4(mac, BWN_REV3PLUS_TSF_HIGH); *tsf = high; *tsf <<= 32; *tsf |= low; } static int bwn_dma_attach(struct bwn_mac *mac) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_softc *sc = mac->mac_sc; bus_addr_t lowaddr = 0; int error; if (siba_get_type(sc->sc_dev) == SIBA_TYPE_PCMCIA || bwn_usedma == 0) return (0); KASSERT(siba_get_revid(sc->sc_dev) >= 5, ("%s: fail", __func__)); mac->mac_flags |= BWN_MAC_FLAG_DMA; dma->dmatype = bwn_dma_gettype(mac); if (dma->dmatype == BWN_DMA_30BIT) lowaddr = BWN_BUS_SPACE_MAXADDR_30BIT; else if (dma->dmatype == BWN_DMA_32BIT) lowaddr = BUS_SPACE_MAXADDR_32BIT; else lowaddr = BUS_SPACE_MAXADDR; /* * Create top level DMA tag */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */ BWN_ALIGN, 0, /* alignment, bounds */ lowaddr, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &dma->parent_dtag); if (error) { device_printf(sc->sc_dev, "can't create parent DMA tag\n"); return (error); } /* * Create TX/RX mbuf DMA tag */ error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->rxbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail0; } error = bus_dma_tag_create(dma->parent_dtag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &dma->txbuf_dtag); if (error) { device_printf(sc->sc_dev, "can't create mbuf DMA tag\n"); goto fail1; } dma->wme[WME_AC_BK] = bwn_dma_ringsetup(mac, 0, 1, dma->dmatype); if (!dma->wme[WME_AC_BK]) goto fail2; dma->wme[WME_AC_BE] = bwn_dma_ringsetup(mac, 1, 1, dma->dmatype); if (!dma->wme[WME_AC_BE]) goto fail3; dma->wme[WME_AC_VI] = bwn_dma_ringsetup(mac, 2, 1, dma->dmatype); if (!dma->wme[WME_AC_VI]) goto fail4; dma->wme[WME_AC_VO] = bwn_dma_ringsetup(mac, 3, 1, dma->dmatype); if (!dma->wme[WME_AC_VO]) goto fail5; dma->mcast = bwn_dma_ringsetup(mac, 4, 1, dma->dmatype); if (!dma->mcast) goto fail6; dma->rx = bwn_dma_ringsetup(mac, 0, 0, dma->dmatype); if (!dma->rx) goto fail7; return (error); fail7: bwn_dma_ringfree(&dma->mcast); fail6: bwn_dma_ringfree(&dma->wme[WME_AC_VO]); fail5: bwn_dma_ringfree(&dma->wme[WME_AC_VI]); fail4: bwn_dma_ringfree(&dma->wme[WME_AC_BE]); fail3: bwn_dma_ringfree(&dma->wme[WME_AC_BK]); fail2: bus_dma_tag_destroy(dma->txbuf_dtag); fail1: bus_dma_tag_destroy(dma->rxbuf_dtag); fail0: bus_dma_tag_destroy(dma->parent_dtag); return (error); } static struct bwn_dma_ring * bwn_dma_parse_cookie(struct bwn_mac *mac, const struct bwn_txstatus *status, uint16_t cookie, int *slot) { struct bwn_dma *dma = &mac->mac_method.dma; struct bwn_dma_ring *dr; struct bwn_softc *sc = mac->mac_sc; BWN_ASSERT_LOCKED(mac->mac_sc); switch (cookie & 0xf000) { case 0x1000: dr = dma->wme[WME_AC_BK]; break; case 0x2000: dr = dma->wme[WME_AC_BE]; break; case 0x3000: dr = dma->wme[WME_AC_VI]; break; case 0x4000: dr = dma->wme[WME_AC_VO]; break; case 0x5000: dr = dma->mcast; break; default: dr = NULL; KASSERT(0 == 1, ("invalid cookie value %d", cookie & 0xf000)); } *slot = (cookie & 0x0fff); if (*slot < 0 || *slot >= dr->dr_numslots) { /* * XXX FIXME: sometimes H/W returns TX DONE events duplicately * that it occurs events which have same H/W sequence numbers. * When it's occurred just prints a WARNING msgs and ignores. */ KASSERT(status->seq == dma->lastseq, ("%s:%d: fail", __func__, __LINE__)); device_printf(sc->sc_dev, "out of slot ranges (0 < %d < %d)\n", *slot, dr->dr_numslots); return (NULL); } dma->lastseq = status->seq; return (dr); } static void bwn_dma_stop(struct bwn_mac *mac) { struct bwn_dma *dma; if ((mac->mac_flags & BWN_MAC_FLAG_DMA) == 0) return; dma = &mac->mac_method.dma; bwn_dma_ringstop(&dma->rx); bwn_dma_ringstop(&dma->wme[WME_AC_BK]); bwn_dma_ringstop(&dma->wme[WME_AC_BE]); bwn_dma_ringstop(&dma->wme[WME_AC_VI]); bwn_dma_ringstop(&dma->wme[WME_AC_VO]); bwn_dma_ringstop(&dma->mcast); } static void bwn_dma_ringstop(struct bwn_dma_ring **dr) { if (dr == NULL) return; bwn_dma_cleanup(*dr); } static void bwn_pio_stop(struct bwn_mac *mac) { struct bwn_pio *pio; if (mac->mac_flags & BWN_MAC_FLAG_DMA) return; pio = &mac->mac_method.pio; bwn_destroy_queue_tx(&pio->mcast); bwn_destroy_queue_tx(&pio->wme[WME_AC_VO]); bwn_destroy_queue_tx(&pio->wme[WME_AC_VI]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BE]); bwn_destroy_queue_tx(&pio->wme[WME_AC_BK]); } static void bwn_led_attach(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; const uint8_t *led_act = NULL; uint16_t val[BWN_LED_MAX]; int i; sc->sc_led_idle = (2350 * hz) / 1000; sc->sc_led_blink = 1; for (i = 0; i < N(bwn_vendor_led_act); ++i) { if (siba_get_pci_subvendor(sc->sc_dev) == bwn_vendor_led_act[i].vid) { led_act = bwn_vendor_led_act[i].led_act; break; } } if (led_act == NULL) led_act = bwn_default_led_act; val[0] = siba_sprom_get_gpio0(sc->sc_dev); val[1] = siba_sprom_get_gpio1(sc->sc_dev); val[2] = siba_sprom_get_gpio2(sc->sc_dev); val[3] = siba_sprom_get_gpio3(sc->sc_dev); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led = &sc->sc_leds[i]; if (val[i] == 0xff) { led->led_act = led_act[i]; } else { if (val[i] & BWN_LED_ACT_LOW) led->led_flags |= BWN_LED_F_ACTLOW; led->led_act = val[i] & BWN_LED_ACT_MASK; } led->led_mask = (1 << i); if (led->led_act == BWN_LED_ACT_BLINK_SLOW || led->led_act == BWN_LED_ACT_BLINK_POLL || led->led_act == BWN_LED_ACT_BLINK) { led->led_flags |= BWN_LED_F_BLINK; if (led->led_act == BWN_LED_ACT_BLINK_POLL) led->led_flags |= BWN_LED_F_POLLABLE; else if (led->led_act == BWN_LED_ACT_BLINK_SLOW) led->led_flags |= BWN_LED_F_SLOW; if (sc->sc_blink_led == NULL) { sc->sc_blink_led = led; if (led->led_flags & BWN_LED_F_SLOW) BWN_LED_SLOWDOWN(sc->sc_led_idle); } } DPRINTF(sc, BWN_DEBUG_LED, "%dth led, act %d, lowact %d\n", i, led->led_act, led->led_flags & BWN_LED_F_ACTLOW); } callout_init_mtx(&sc->sc_led_blink_ch, &sc->sc_mtx, 0); } static __inline uint16_t bwn_led_onoff(const struct bwn_led *led, uint16_t val, int on) { if (led->led_flags & BWN_LED_F_ACTLOW) on = !on; if (on) val |= led->led_mask; else val &= ~led->led_mask; return val; } static void bwn_led_newstate(struct bwn_mac *mac, enum ieee80211_state nstate) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t val; int i; if (nstate == IEEE80211_S_INIT) { callout_stop(&sc->sc_led_blink_ch); sc->sc_led_blinking = 0; } if ((sc->sc_flags & BWN_FLAG_RUNNING) == 0) return; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); for (i = 0; i < BWN_LED_MAX; ++i) { struct bwn_led *led = &sc->sc_leds[i]; int on; if (led->led_act == BWN_LED_ACT_UNKN || led->led_act == BWN_LED_ACT_NULL) continue; if ((led->led_flags & BWN_LED_F_BLINK) && nstate != IEEE80211_S_INIT) continue; switch (led->led_act) { case BWN_LED_ACT_ON: /* Always on */ on = 1; break; case BWN_LED_ACT_OFF: /* Always off */ case BWN_LED_ACT_5GHZ: /* TODO: 11A */ on = 0; break; default: on = 1; switch (nstate) { case IEEE80211_S_INIT: on = 0; break; case IEEE80211_S_RUN: if (led->led_act == BWN_LED_ACT_11G && ic->ic_curmode != IEEE80211_MODE_11G) on = 0; break; default: if (led->led_act == BWN_LED_ACT_ASSOC) on = 0; break; } break; } val = bwn_led_onoff(led, val, on); } BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); } static void bwn_led_event(struct bwn_mac *mac, int event) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; int rate; if (event == BWN_LED_EVENT_POLL) { if ((led->led_flags & BWN_LED_F_POLLABLE) == 0) return; if (ticks - sc->sc_led_ticks < sc->sc_led_idle) return; } sc->sc_led_ticks = ticks; if (sc->sc_led_blinking) return; switch (event) { case BWN_LED_EVENT_RX: rate = sc->sc_rx_rate; break; case BWN_LED_EVENT_TX: rate = sc->sc_tx_rate; break; case BWN_LED_EVENT_POLL: rate = 0; break; default: panic("unknown LED event %d\n", event); break; } bwn_led_blink_start(mac, bwn_led_duration[rate].on_dur, bwn_led_duration[rate].off_dur); } static void bwn_led_blink_start(struct bwn_mac *mac, int on_dur, int off_dur) { struct bwn_softc *sc = mac->mac_sc; struct bwn_led *led = sc->sc_blink_led; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(led, val, 1); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); if (led->led_flags & BWN_LED_F_SLOW) { BWN_LED_SLOWDOWN(on_dur); BWN_LED_SLOWDOWN(off_dur); } sc->sc_led_blinking = 1; sc->sc_led_blink_offdur = off_dur; callout_reset(&sc->sc_led_blink_ch, on_dur, bwn_led_blink_next, mac); } static void bwn_led_blink_next(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; uint16_t val; val = BWN_READ_2(mac, BWN_GPIO_CONTROL); val = bwn_led_onoff(sc->sc_blink_led, val, 0); BWN_WRITE_2(mac, BWN_GPIO_CONTROL, val); callout_reset(&sc->sc_led_blink_ch, sc->sc_led_blink_offdur, bwn_led_blink_end, mac); } static void bwn_led_blink_end(void *arg) { struct bwn_mac *mac = arg; struct bwn_softc *sc = mac->mac_sc; sc->sc_led_blinking = 0; } static int bwn_suspend(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); BWN_LOCK(sc); bwn_stop(sc); BWN_UNLOCK(sc); return (0); } static int bwn_resume(device_t dev) { struct bwn_softc *sc = device_get_softc(dev); int error = EDOOFUS; BWN_LOCK(sc); if (sc->sc_ic.ic_nrunning > 0) error = bwn_init(sc); BWN_UNLOCK(sc); if (error == 0) ieee80211_start_all(&sc->sc_ic); return (0); } static void bwn_rfswitch(void *arg) { struct bwn_softc *sc = arg; struct bwn_mac *mac = sc->sc_curmac; int cur = 0, prev = 0; KASSERT(mac->mac_status >= BWN_MAC_STATUS_STARTED, ("%s: invalid MAC status %d", __func__, mac->mac_status)); if (mac->mac_phy.rev >= 3 || mac->mac_phy.type == BWN_PHYTYPE_LP || mac->mac_phy.type == BWN_PHYTYPE_N) { if (!(BWN_READ_4(mac, BWN_RF_HWENABLED_HI) & BWN_RF_HWENABLED_HI_MASK)) cur = 1; } else { if (BWN_READ_2(mac, BWN_RF_HWENABLED_LO) & BWN_RF_HWENABLED_LO_MASK) cur = 1; } if (mac->mac_flags & BWN_MAC_FLAG_RADIO_ON) prev = 1; DPRINTF(sc, BWN_DEBUG_RESET, "%s: called; cur=%d, prev=%d\n", __func__, cur, prev); if (cur != prev) { if (cur) mac->mac_flags |= BWN_MAC_FLAG_RADIO_ON; else mac->mac_flags &= ~BWN_MAC_FLAG_RADIO_ON; device_printf(sc->sc_dev, "status of RF switch is changed to %s\n", cur ? "ON" : "OFF"); if (cur != mac->mac_phy.rf_on) { if (cur) bwn_rf_turnon(mac); else bwn_rf_turnoff(mac); } } callout_schedule(&sc->sc_rfswitch_ch, hz); } static void bwn_sysctl_node(struct bwn_softc *sc) { device_t dev = sc->sc_dev; struct bwn_mac *mac; struct bwn_stats *stats; /* XXX assume that count of MAC is only 1. */ if ((mac = sc->sc_curmac) == NULL) return; stats = &mac->mac_stats; SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "linknoise", CTLFLAG_RW, &stats->rts, 0, "Noise level"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rts", CTLFLAG_RW, &stats->rts, 0, "RTS"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rtsfail", CTLFLAG_RW, &stats->rtsfail, 0, "RTS failed to send"); #ifdef BWN_DEBUG SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "Debug flags"); #endif } static device_method_t bwn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, bwn_probe), DEVMETHOD(device_attach, bwn_attach), DEVMETHOD(device_detach, bwn_detach), DEVMETHOD(device_suspend, bwn_suspend), DEVMETHOD(device_resume, bwn_resume), DEVMETHOD_END }; driver_t bwn_driver = { "bwn", bwn_methods, sizeof(struct bwn_softc) }; static devclass_t bwn_devclass; DRIVER_MODULE(bwn, siba_bwn, bwn_driver, bwn_devclass, 0, 0); MODULE_DEPEND(bwn, siba_bwn, 1, 1, 1); MODULE_DEPEND(bwn, gpiobus, 1, 1, 1); MODULE_DEPEND(bwn, wlan, 1, 1, 1); /* 802.11 media layer */ MODULE_DEPEND(bwn, firmware, 1, 1, 1); /* firmware support */ MODULE_DEPEND(bwn, wlan_amrr, 1, 1, 1); MODULE_VERSION(bwn, 1); Index: head/sys/dev/bwn/if_bwn_phy_lp.c =================================================================== --- head/sys/dev/bwn/if_bwn_phy_lp.c (revision 327948) +++ head/sys/dev/bwn/if_bwn_phy_lp.c (revision 327949) @@ -1,3660 +1,3660 @@ /*- * Copyright (c) 2009-2010 Weongyo Jeong * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any * redistribution must be conditioned upon including a substantially * similar Disclaimer requirement for further binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGES. */ #include __FBSDID("$FreeBSD$"); #include "opt_bwn.h" #include "opt_wlan.h" /* * The Broadcom Wireless LAN controller driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void bwn_phy_lp_readsprom(struct bwn_mac *); static void bwn_phy_lp_bbinit(struct bwn_mac *); static void bwn_phy_lp_txpctl_init(struct bwn_mac *); static void bwn_phy_lp_calib(struct bwn_mac *); static int bwn_phy_lp_b2062_switch_channel(struct bwn_mac *, uint8_t); static int bwn_phy_lp_b2063_switch_channel(struct bwn_mac *, uint8_t); static void bwn_phy_lp_set_anafilter(struct bwn_mac *, uint8_t); static void bwn_phy_lp_set_gaintbl(struct bwn_mac *, uint32_t); static void bwn_phy_lp_digflt_save(struct bwn_mac *); static void bwn_phy_lp_get_txpctlmode(struct bwn_mac *); static void bwn_phy_lp_set_txpctlmode(struct bwn_mac *, uint8_t); static void bwn_phy_lp_bugfix(struct bwn_mac *); static void bwn_phy_lp_digflt_restore(struct bwn_mac *); static void bwn_phy_lp_tblinit(struct bwn_mac *); static void bwn_phy_lp_bbinit_r2(struct bwn_mac *); static void bwn_phy_lp_bbinit_r01(struct bwn_mac *); static void bwn_phy_lp_b2062_init(struct bwn_mac *); static void bwn_phy_lp_b2063_init(struct bwn_mac *); static void bwn_phy_lp_rxcal_r2(struct bwn_mac *); static void bwn_phy_lp_rccal_r12(struct bwn_mac *); static void bwn_phy_lp_set_rccap(struct bwn_mac *); static uint32_t bwn_phy_lp_roundup(uint32_t, uint32_t, uint8_t); static void bwn_phy_lp_b2062_reset_pllbias(struct bwn_mac *); static void bwn_phy_lp_b2062_vco_calib(struct bwn_mac *); static void bwn_tab_write_multi(struct bwn_mac *, uint32_t, int, const void *); static void bwn_tab_read_multi(struct bwn_mac *, uint32_t, int, void *); static struct bwn_txgain bwn_phy_lp_get_txgain(struct bwn_mac *); static uint8_t bwn_phy_lp_get_bbmult(struct bwn_mac *); static void bwn_phy_lp_set_txgain(struct bwn_mac *, struct bwn_txgain *); static void bwn_phy_lp_set_bbmult(struct bwn_mac *, uint8_t); static void bwn_phy_lp_set_trsw_over(struct bwn_mac *, uint8_t, uint8_t); static void bwn_phy_lp_set_rxgain(struct bwn_mac *, uint32_t); static void bwn_phy_lp_set_deaf(struct bwn_mac *, uint8_t); static int bwn_phy_lp_calc_rx_iq_comp(struct bwn_mac *, uint16_t); static void bwn_phy_lp_clear_deaf(struct bwn_mac *, uint8_t); static void bwn_phy_lp_tblinit_r01(struct bwn_mac *); static void bwn_phy_lp_tblinit_r2(struct bwn_mac *); static void bwn_phy_lp_tblinit_txgain(struct bwn_mac *); static void bwn_tab_write(struct bwn_mac *, uint32_t, uint32_t); static void bwn_phy_lp_b2062_tblinit(struct bwn_mac *); static void bwn_phy_lp_b2063_tblinit(struct bwn_mac *); static int bwn_phy_lp_loopback(struct bwn_mac *); static void bwn_phy_lp_set_rxgain_idx(struct bwn_mac *, uint16_t); static void bwn_phy_lp_ddfs_turnon(struct bwn_mac *, int, int, int, int, int); static uint8_t bwn_phy_lp_rx_iq_est(struct bwn_mac *, uint16_t, uint8_t, struct bwn_phy_lp_iq_est *); static void bwn_phy_lp_ddfs_turnoff(struct bwn_mac *); static uint32_t bwn_tab_read(struct bwn_mac *, uint32_t); static void bwn_phy_lp_set_txgain_dac(struct bwn_mac *, uint16_t); static void bwn_phy_lp_set_txgain_pa(struct bwn_mac *, uint16_t); static void bwn_phy_lp_set_txgain_override(struct bwn_mac *); static uint16_t bwn_phy_lp_get_pa_gain(struct bwn_mac *); static uint8_t bwn_nbits(int32_t); static void bwn_phy_lp_gaintbl_write_multi(struct bwn_mac *, int, int, struct bwn_txgain_entry *); static void bwn_phy_lp_gaintbl_write(struct bwn_mac *, int, struct bwn_txgain_entry); static void bwn_phy_lp_gaintbl_write_r2(struct bwn_mac *, int, struct bwn_txgain_entry); static void bwn_phy_lp_gaintbl_write_r01(struct bwn_mac *, int, struct bwn_txgain_entry); static const uint8_t bwn_b2063_chantable_data[33][12] = { { 0x6f, 0x3c, 0x3c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6f, 0x2c, 0x2c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6f, 0x1c, 0x1c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6e, 0x1c, 0x1c, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6e, 0xc, 0xc, 0x4, 0x5, 0x5, 0x5, 0x5, 0x77, 0x80, 0x80, 0x70 }, { 0x6a, 0xc, 0xc, 0, 0x2, 0x5, 0xd, 0xd, 0x77, 0x80, 0x20, 0 }, { 0x6a, 0xc, 0xc, 0, 0x1, 0x5, 0xd, 0xc, 0x77, 0x80, 0x20, 0 }, { 0x6a, 0xc, 0xc, 0, 0x1, 0x4, 0xc, 0xc, 0x77, 0x80, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0x1, 0x4, 0xc, 0xc, 0x77, 0x70, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0x1, 0x4, 0xb, 0xc, 0x77, 0x70, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0, 0x4, 0xb, 0xb, 0x77, 0x60, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0, 0x3, 0xa, 0xb, 0x77, 0x60, 0x20, 0 }, { 0x69, 0xc, 0xc, 0, 0, 0x3, 0xa, 0xa, 0x77, 0x60, 0x20, 0 }, { 0x68, 0xc, 0xc, 0, 0, 0x2, 0x9, 0x9, 0x77, 0x60, 0x20, 0 }, { 0x68, 0xc, 0xc, 0, 0, 0x1, 0x8, 0x8, 0x77, 0x50, 0x10, 0 }, { 0x67, 0xc, 0xc, 0, 0, 0, 0x8, 0x8, 0x77, 0x50, 0x10, 0 }, { 0x64, 0xc, 0xc, 0, 0, 0, 0x2, 0x1, 0x77, 0x20, 0, 0 }, { 0x64, 0xc, 0xc, 0, 0, 0, 0x1, 0x1, 0x77, 0x20, 0, 0 }, { 0x63, 0xc, 0xc, 0, 0, 0, 0x1, 0, 0x77, 0x10, 0, 0 }, { 0x63, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0x10, 0, 0 }, { 0x62, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0x10, 0, 0 }, { 0x62, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 }, { 0x61, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 }, { 0x60, 0xc, 0xc, 0, 0, 0, 0, 0, 0x77, 0, 0, 0 }, { 0x6e, 0xc, 0xc, 0, 0x9, 0xe, 0xf, 0xf, 0x77, 0xc0, 0x50, 0 }, { 0x6e, 0xc, 0xc, 0, 0x9, 0xd, 0xf, 0xf, 0x77, 0xb0, 0x50, 0 }, { 0x6e, 0xc, 0xc, 0, 0x8, 0xc, 0xf, 0xf, 0x77, 0xb0, 0x50, 0 }, { 0x6d, 0xc, 0xc, 0, 0x8, 0xc, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 }, { 0x6d, 0xc, 0xc, 0, 0x8, 0xb, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 }, { 0x6d, 0xc, 0xc, 0, 0x8, 0xa, 0xf, 0xf, 0x77, 0xa0, 0x40, 0 }, { 0x6c, 0xc, 0xc, 0, 0x7, 0x9, 0xf, 0xf, 0x77, 0x90, 0x40, 0 }, { 0x6c, 0xc, 0xc, 0, 0x6, 0x8, 0xf, 0xf, 0x77, 0x90, 0x40, 0 }, { 0x6c, 0xc, 0xc, 0, 0x5, 0x8, 0xf, 0xf, 0x77, 0x90, 0x40, 0 } }; static const struct bwn_b206x_chan bwn_b2063_chantable[] = { { 1, 2412, bwn_b2063_chantable_data[0] }, { 2, 2417, bwn_b2063_chantable_data[0] }, { 3, 2422, bwn_b2063_chantable_data[0] }, { 4, 2427, bwn_b2063_chantable_data[1] }, { 5, 2432, bwn_b2063_chantable_data[1] }, { 6, 2437, bwn_b2063_chantable_data[1] }, { 7, 2442, bwn_b2063_chantable_data[1] }, { 8, 2447, bwn_b2063_chantable_data[1] }, { 9, 2452, bwn_b2063_chantable_data[2] }, { 10, 2457, bwn_b2063_chantable_data[2] }, { 11, 2462, bwn_b2063_chantable_data[3] }, { 12, 2467, bwn_b2063_chantable_data[3] }, { 13, 2472, bwn_b2063_chantable_data[3] }, { 14, 2484, bwn_b2063_chantable_data[4] }, { 34, 5170, bwn_b2063_chantable_data[5] }, { 36, 5180, bwn_b2063_chantable_data[6] }, { 38, 5190, bwn_b2063_chantable_data[7] }, { 40, 5200, bwn_b2063_chantable_data[8] }, { 42, 5210, bwn_b2063_chantable_data[9] }, { 44, 5220, bwn_b2063_chantable_data[10] }, { 46, 5230, bwn_b2063_chantable_data[11] }, { 48, 5240, bwn_b2063_chantable_data[12] }, { 52, 5260, bwn_b2063_chantable_data[13] }, { 56, 5280, bwn_b2063_chantable_data[14] }, { 60, 5300, bwn_b2063_chantable_data[14] }, { 64, 5320, bwn_b2063_chantable_data[15] }, { 100, 5500, bwn_b2063_chantable_data[16] }, { 104, 5520, bwn_b2063_chantable_data[17] }, { 108, 5540, bwn_b2063_chantable_data[18] }, { 112, 5560, bwn_b2063_chantable_data[19] }, { 116, 5580, bwn_b2063_chantable_data[20] }, { 120, 5600, bwn_b2063_chantable_data[21] }, { 124, 5620, bwn_b2063_chantable_data[21] }, { 128, 5640, bwn_b2063_chantable_data[22] }, { 132, 5660, bwn_b2063_chantable_data[22] }, { 136, 5680, bwn_b2063_chantable_data[22] }, { 140, 5700, bwn_b2063_chantable_data[23] }, { 149, 5745, bwn_b2063_chantable_data[23] }, { 153, 5765, bwn_b2063_chantable_data[23] }, { 157, 5785, bwn_b2063_chantable_data[23] }, { 161, 5805, bwn_b2063_chantable_data[23] }, { 165, 5825, bwn_b2063_chantable_data[23] }, { 184, 4920, bwn_b2063_chantable_data[24] }, { 188, 4940, bwn_b2063_chantable_data[25] }, { 192, 4960, bwn_b2063_chantable_data[26] }, { 196, 4980, bwn_b2063_chantable_data[27] }, { 200, 5000, bwn_b2063_chantable_data[28] }, { 204, 5020, bwn_b2063_chantable_data[29] }, { 208, 5040, bwn_b2063_chantable_data[30] }, { 212, 5060, bwn_b2063_chantable_data[31] }, { 216, 5080, bwn_b2063_chantable_data[32] } }; static const uint8_t bwn_b2062_chantable_data[22][12] = { { 0xff, 0xff, 0xb5, 0x1b, 0x24, 0x32, 0x32, 0x88, 0x88, 0, 0, 0 }, { 0, 0x22, 0x20, 0x84, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0x10, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0x20, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0x10, 0x84, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x11, 0, 0x83, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x63, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x62, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x30, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x20, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0x10, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0, 0, 0, 0, 0x3c, 0x77, 0x37, 0xff, 0x88, 0, 0, 0 }, { 0x55, 0x77, 0x90, 0xf7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x44, 0x77, 0x80, 0xe7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x44, 0x66, 0x80, 0xe7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x33, 0x66, 0x70, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x22, 0x55, 0x60, 0xd7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x22, 0x55, 0x60, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x22, 0x44, 0x50, 0xc7, 0x3c, 0x77, 0x35, 0xff, 0xff, 0, 0, 0 }, { 0x11, 0x44, 0x50, 0xa5, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 }, { 0, 0x44, 0x40, 0xb6, 0x3c, 0x77, 0x35, 0xff, 0x88, 0, 0, 0 } }; static const struct bwn_b206x_chan bwn_b2062_chantable[] = { { 1, 2412, bwn_b2062_chantable_data[0] }, { 2, 2417, bwn_b2062_chantable_data[0] }, { 3, 2422, bwn_b2062_chantable_data[0] }, { 4, 2427, bwn_b2062_chantable_data[0] }, { 5, 2432, bwn_b2062_chantable_data[0] }, { 6, 2437, bwn_b2062_chantable_data[0] }, { 7, 2442, bwn_b2062_chantable_data[0] }, { 8, 2447, bwn_b2062_chantable_data[0] }, { 9, 2452, bwn_b2062_chantable_data[0] }, { 10, 2457, bwn_b2062_chantable_data[0] }, { 11, 2462, bwn_b2062_chantable_data[0] }, { 12, 2467, bwn_b2062_chantable_data[0] }, { 13, 2472, bwn_b2062_chantable_data[0] }, { 14, 2484, bwn_b2062_chantable_data[0] }, { 34, 5170, bwn_b2062_chantable_data[1] }, { 38, 5190, bwn_b2062_chantable_data[2] }, { 42, 5210, bwn_b2062_chantable_data[2] }, { 46, 5230, bwn_b2062_chantable_data[3] }, { 36, 5180, bwn_b2062_chantable_data[4] }, { 40, 5200, bwn_b2062_chantable_data[5] }, { 44, 5220, bwn_b2062_chantable_data[6] }, { 48, 5240, bwn_b2062_chantable_data[3] }, { 52, 5260, bwn_b2062_chantable_data[3] }, { 56, 5280, bwn_b2062_chantable_data[3] }, { 60, 5300, bwn_b2062_chantable_data[7] }, { 64, 5320, bwn_b2062_chantable_data[8] }, { 100, 5500, bwn_b2062_chantable_data[9] }, { 104, 5520, bwn_b2062_chantable_data[10] }, { 108, 5540, bwn_b2062_chantable_data[10] }, { 112, 5560, bwn_b2062_chantable_data[10] }, { 116, 5580, bwn_b2062_chantable_data[11] }, { 120, 5600, bwn_b2062_chantable_data[12] }, { 124, 5620, bwn_b2062_chantable_data[12] }, { 128, 5640, bwn_b2062_chantable_data[12] }, { 132, 5660, bwn_b2062_chantable_data[12] }, { 136, 5680, bwn_b2062_chantable_data[12] }, { 140, 5700, bwn_b2062_chantable_data[12] }, { 149, 5745, bwn_b2062_chantable_data[12] }, { 153, 5765, bwn_b2062_chantable_data[12] }, { 157, 5785, bwn_b2062_chantable_data[12] }, { 161, 5805, bwn_b2062_chantable_data[12] }, { 165, 5825, bwn_b2062_chantable_data[12] }, { 184, 4920, bwn_b2062_chantable_data[13] }, { 188, 4940, bwn_b2062_chantable_data[14] }, { 192, 4960, bwn_b2062_chantable_data[15] }, { 196, 4980, bwn_b2062_chantable_data[16] }, { 200, 5000, bwn_b2062_chantable_data[17] }, { 204, 5020, bwn_b2062_chantable_data[18] }, { 208, 5040, bwn_b2062_chantable_data[19] }, { 212, 5060, bwn_b2062_chantable_data[20] }, { 216, 5080, bwn_b2062_chantable_data[21] } }; /* for LP PHY */ static const struct bwn_rxcompco bwn_rxcompco_5354[] = { { 1, -66, 15 }, { 2, -66, 15 }, { 3, -66, 15 }, { 4, -66, 15 }, { 5, -66, 15 }, { 6, -66, 15 }, { 7, -66, 14 }, { 8, -66, 14 }, { 9, -66, 14 }, { 10, -66, 14 }, { 11, -66, 14 }, { 12, -66, 13 }, { 13, -66, 13 }, { 14, -66, 13 }, }; /* for LP PHY */ static const struct bwn_rxcompco bwn_rxcompco_r12[] = { { 1, -64, 13 }, { 2, -64, 13 }, { 3, -64, 13 }, { 4, -64, 13 }, { 5, -64, 12 }, { 6, -64, 12 }, { 7, -64, 12 }, { 8, -64, 12 }, { 9, -64, 12 }, { 10, -64, 11 }, { 11, -64, 11 }, { 12, -64, 11 }, { 13, -64, 11 }, { 14, -64, 10 }, { 34, -62, 24 }, { 38, -62, 24 }, { 42, -62, 24 }, { 46, -62, 23 }, { 36, -62, 24 }, { 40, -62, 24 }, { 44, -62, 23 }, { 48, -62, 23 }, { 52, -62, 23 }, { 56, -62, 22 }, { 60, -62, 22 }, { 64, -62, 22 }, { 100, -62, 16 }, { 104, -62, 16 }, { 108, -62, 15 }, { 112, -62, 14 }, { 116, -62, 14 }, { 120, -62, 13 }, { 124, -62, 12 }, { 128, -62, 12 }, { 132, -62, 12 }, { 136, -62, 11 }, { 140, -62, 10 }, { 149, -61, 9 }, { 153, -61, 9 }, { 157, -61, 9 }, { 161, -61, 8 }, { 165, -61, 8 }, { 184, -62, 25 }, { 188, -62, 25 }, { 192, -62, 25 }, { 196, -62, 25 }, { 200, -62, 25 }, { 204, -62, 25 }, { 208, -62, 25 }, { 212, -62, 25 }, { 216, -62, 26 }, }; static const struct bwn_rxcompco bwn_rxcompco_r2 = { 0, -64, 0 }; static const uint8_t bwn_tab_sigsq_tbl[] = { 0xde, 0xdc, 0xda, 0xd8, 0xd6, 0xd4, 0xd2, 0xcf, 0xcd, 0xca, 0xc7, 0xc4, 0xc1, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0x00, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xbe, 0xc1, 0xc4, 0xc7, 0xca, 0xcd, 0xcf, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde, }; static const uint8_t bwn_tab_pllfrac_tbl[] = { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x00, 0x00, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, }; static const uint16_t bwn_tabl_iqlocal_tbl[] = { 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0200, 0x0300, 0x0400, 0x0600, 0x0800, 0x0b00, 0x1000, 0x1001, 0x1002, 0x1003, 0x1004, 0x1005, 0x1006, 0x1007, 0x1707, 0x2007, 0x2d07, 0x4007, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; void bwn_phy_lp_init_pre(struct bwn_mac *mac) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_lp *plp = &phy->phy_lp; plp->plp_antenna = BWN_ANT_DEFAULT; } int bwn_phy_lp_init(struct bwn_mac *mac) { static const struct bwn_stxtable tables[] = { { 2, 6, 0x3d, 3, 0x01 }, { 1, 12, 0x4c, 1, 0x01 }, { 1, 8, 0x50, 0, 0x7f }, { 0, 8, 0x44, 0, 0xff }, { 1, 0, 0x4a, 0, 0xff }, { 0, 4, 0x4d, 0, 0xff }, { 1, 4, 0x4e, 0, 0xff }, { 0, 12, 0x4f, 0, 0x0f }, { 1, 0, 0x4f, 4, 0x0f }, { 3, 0, 0x49, 0, 0x0f }, { 4, 3, 0x46, 4, 0x07 }, { 3, 15, 0x46, 0, 0x01 }, { 4, 0, 0x46, 1, 0x07 }, { 3, 8, 0x48, 4, 0x07 }, { 3, 11, 0x48, 0, 0x0f }, { 3, 4, 0x49, 4, 0x0f }, { 2, 15, 0x45, 0, 0x01 }, { 5, 13, 0x52, 4, 0x07 }, { 6, 0, 0x52, 7, 0x01 }, { 5, 3, 0x41, 5, 0x07 }, { 5, 6, 0x41, 0, 0x0f }, { 5, 10, 0x42, 5, 0x07 }, { 4, 15, 0x42, 0, 0x01 }, { 5, 0, 0x42, 1, 0x07 }, { 4, 11, 0x43, 4, 0x0f }, { 4, 7, 0x43, 0, 0x0f }, { 4, 6, 0x45, 1, 0x01 }, { 2, 7, 0x40, 4, 0x0f }, { 2, 11, 0x40, 0, 0x0f } }; struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; const struct bwn_stxtable *st; struct ieee80211com *ic = &sc->sc_ic; int i, error; uint16_t tmp; bwn_phy_lp_readsprom(mac); /* XXX bad place */ bwn_phy_lp_bbinit(mac); /* initialize RF */ BWN_PHY_SET(mac, BWN_PHY_4WIRECTL, 0x2); DELAY(1); BWN_PHY_MASK(mac, BWN_PHY_4WIRECTL, 0xfffd); DELAY(1); if (mac->mac_phy.rf_ver == 0x2062) bwn_phy_lp_b2062_init(mac); else { bwn_phy_lp_b2063_init(mac); /* synchronize stx table. */ for (i = 0; i < N(tables); i++) { st = &tables[i]; tmp = BWN_RF_READ(mac, st->st_rfaddr); tmp >>= st->st_rfshift; tmp <<= st->st_physhift; BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xf2 + st->st_phyoffset), ~(st->st_mask << st->st_physhift), tmp); } BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xf0), 0x5f80); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xf1), 0); } /* calibrate RC */ if (mac->mac_phy.rev >= 2) bwn_phy_lp_rxcal_r2(mac); else if (!plp->plp_rccap) { if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_rccal_r12(mac); } else bwn_phy_lp_set_rccap(mac); error = bwn_phy_lp_switch_channel(mac, 7); if (error) device_printf(sc->sc_dev, "failed to change channel 7 (%d)\n", error); bwn_phy_lp_txpctl_init(mac); bwn_phy_lp_calib(mac); return (0); } uint16_t bwn_phy_lp_read(struct bwn_mac *mac, uint16_t reg) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); return (BWN_READ_2(mac, BWN_PHYDATA)); } void bwn_phy_lp_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); BWN_WRITE_2(mac, BWN_PHYDATA, value); } void bwn_phy_lp_maskset(struct bwn_mac *mac, uint16_t reg, uint16_t mask, uint16_t set) { BWN_WRITE_2(mac, BWN_PHYCTL, reg); BWN_WRITE_2(mac, BWN_PHYDATA, (BWN_READ_2(mac, BWN_PHYDATA) & mask) | set); } uint16_t bwn_phy_lp_rf_read(struct bwn_mac *mac, uint16_t reg) { KASSERT(reg != 1, ("unaccessible register %d", reg)); if (mac->mac_phy.rev < 2 && reg != 0x4001) reg |= 0x100; if (mac->mac_phy.rev >= 2) reg |= 0x200; BWN_WRITE_2(mac, BWN_RFCTL, reg); return BWN_READ_2(mac, BWN_RFDATALO); } void bwn_phy_lp_rf_write(struct bwn_mac *mac, uint16_t reg, uint16_t value) { KASSERT(reg != 1, ("unaccessible register %d", reg)); BWN_WRITE_2(mac, BWN_RFCTL, reg); BWN_WRITE_2(mac, BWN_RFDATALO, value); } void bwn_phy_lp_rf_onoff(struct bwn_mac *mac, int on) { if (on) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xe0ff); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, (mac->mac_phy.rev >= 2) ? 0xf7f7 : 0xffe7); return; } if (mac->mac_phy.rev >= 2) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x83ff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1f00); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0x80ff); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xdfff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x0808); return; } BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xe0ff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1f00); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfcff); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x0018); } int bwn_phy_lp_switch_channel(struct bwn_mac *mac, uint32_t chan) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_lp *plp = &phy->phy_lp; int error; if (phy->rf_ver == 0x2063) { error = bwn_phy_lp_b2063_switch_channel(mac, chan); if (error) return (error); } else { error = bwn_phy_lp_b2062_switch_channel(mac, chan); if (error) return (error); bwn_phy_lp_set_anafilter(mac, chan); bwn_phy_lp_set_gaintbl(mac, ieee80211_ieee2mhz(chan, 0)); } plp->plp_chan = chan; BWN_WRITE_2(mac, BWN_CHANNEL, chan); return (0); } uint32_t bwn_phy_lp_get_default_chan(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; return (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? 1 : 36); } void bwn_phy_lp_set_antenna(struct bwn_mac *mac, int antenna) { struct bwn_phy *phy = &mac->mac_phy; struct bwn_phy_lp *plp = &phy->phy_lp; if (phy->rev >= 2 || antenna > BWN_ANTAUTO1) return; bwn_hf_write(mac, bwn_hf_read(mac) & ~BWN_HF_UCODE_ANTDIV_HELPER); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfffd, antenna & 0x2); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfffe, antenna & 0x1); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_UCODE_ANTDIV_HELPER); plp->plp_antenna = antenna; } void bwn_phy_lp_task_60s(struct bwn_mac *mac) { bwn_phy_lp_calib(mac); } static void bwn_phy_lp_readsprom(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { plp->plp_txisoband_m = siba_sprom_get_tri2g(sc->sc_dev); plp->plp_bxarch = siba_sprom_get_bxa2g(sc->sc_dev); plp->plp_rxpwroffset = siba_sprom_get_rxpo2g(sc->sc_dev); plp->plp_rssivf = siba_sprom_get_rssismf2g(sc->sc_dev); plp->plp_rssivc = siba_sprom_get_rssismc2g(sc->sc_dev); plp->plp_rssigs = siba_sprom_get_rssisav2g(sc->sc_dev); return; } plp->plp_txisoband_l = siba_sprom_get_tri5gl(sc->sc_dev); plp->plp_txisoband_m = siba_sprom_get_tri5g(sc->sc_dev); plp->plp_txisoband_h = siba_sprom_get_tri5gh(sc->sc_dev); plp->plp_bxarch = siba_sprom_get_bxa5g(sc->sc_dev); plp->plp_rxpwroffset = siba_sprom_get_rxpo5g(sc->sc_dev); plp->plp_rssivf = siba_sprom_get_rssismf5g(sc->sc_dev); plp->plp_rssivc = siba_sprom_get_rssismc5g(sc->sc_dev); plp->plp_rssigs = siba_sprom_get_rssisav5g(sc->sc_dev); } static void bwn_phy_lp_bbinit(struct bwn_mac *mac) { bwn_phy_lp_tblinit(mac); if (mac->mac_phy.rev >= 2) bwn_phy_lp_bbinit_r2(mac); else bwn_phy_lp_bbinit_r01(mac); } static void bwn_phy_lp_txpctl_init(struct bwn_mac *mac) { struct bwn_txgain gain_2ghz = { 4, 12, 12, 0 }; struct bwn_txgain gain_5ghz = { 7, 15, 14, 0 }; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; bwn_phy_lp_set_txgain(mac, IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan) ? &gain_2ghz : &gain_5ghz); bwn_phy_lp_set_bbmult(mac, 150); } static void bwn_phy_lp_calib(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; const struct bwn_rxcompco *rc = NULL; struct bwn_txgain ogain; int i, omode, oafeovr, orf, obbmult; uint8_t mode, fc = 0; if (plp->plp_chanfullcal != plp->plp_chan) { plp->plp_chanfullcal = plp->plp_chan; fc = 1; } bwn_mac_suspend(mac); /* BlueTooth Coexistance Override */ BWN_WRITE_2(mac, BWN_BTCOEX_CTL, 0x3); BWN_WRITE_2(mac, BWN_BTCOEX_TXCTL, 0xff); if (mac->mac_phy.rev >= 2) bwn_phy_lp_digflt_save(mac); bwn_phy_lp_get_txpctlmode(mac); mode = plp->plp_txpctlmode; bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); if (mac->mac_phy.rev == 0 && mode != BWN_PHYLP_TXPCTL_OFF) bwn_phy_lp_bugfix(mac); if (mac->mac_phy.rev >= 2 && fc == 1) { bwn_phy_lp_get_txpctlmode(mac); omode = plp->plp_txpctlmode; oafeovr = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR) & 0x40; if (oafeovr) ogain = bwn_phy_lp_get_txgain(mac); orf = BWN_PHY_READ(mac, BWN_PHY_RF_PWR_OVERRIDE) & 0xff; obbmult = bwn_phy_lp_get_bbmult(mac); bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); if (oafeovr) bwn_phy_lp_set_txgain(mac, &ogain); bwn_phy_lp_set_bbmult(mac, obbmult); bwn_phy_lp_set_txpctlmode(mac, omode); BWN_PHY_SETMASK(mac, BWN_PHY_RF_PWR_OVERRIDE, 0xff00, orf); } bwn_phy_lp_set_txpctlmode(mac, mode); if (mac->mac_phy.rev >= 2) bwn_phy_lp_digflt_restore(mac); /* do RX IQ Calculation; assumes that noise is true. */ if (siba_get_chipid(sc->sc_dev) == 0x5354) { for (i = 0; i < N(bwn_rxcompco_5354); i++) { if (bwn_rxcompco_5354[i].rc_chan == plp->plp_chan) rc = &bwn_rxcompco_5354[i]; } } else if (mac->mac_phy.rev >= 2) rc = &bwn_rxcompco_r2; else { for (i = 0; i < N(bwn_rxcompco_r12); i++) { if (bwn_rxcompco_r12[i].rc_chan == plp->plp_chan) rc = &bwn_rxcompco_r12[i]; } } if (rc == NULL) goto fail; BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, rc->rc_c1); BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff, rc->rc_c0 << 8); bwn_phy_lp_set_trsw_over(mac, 1 /* TX */, 0 /* RX */); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfff7, 0); } else { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x20); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffdf, 0); } bwn_phy_lp_set_rxgain(mac, 0x2d5d); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfffe); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xfffe); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x800); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x800); bwn_phy_lp_set_deaf(mac, 0); /* XXX no checking return value? */ (void)bwn_phy_lp_calc_rx_iq_comp(mac, 0xfff0); bwn_phy_lp_clear_deaf(mac, 0); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfffc); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfff7); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffdf); /* disable RX GAIN override. */ BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xfffe); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffef); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xffbf); if (mac->mac_phy.rev >= 2) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfeff); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfbff); BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xe5), 0xfff7); } } else { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfdff); } BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfffe); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xf7ff); fail: bwn_mac_enable(mac); } void bwn_phy_lp_switch_analog(struct bwn_mac *mac, int on) { if (on) { BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xfff8); return; } BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVRVAL, 0x0007); BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 0x0007); } static int bwn_phy_lp_b2063_switch_channel(struct bwn_mac *mac, uint8_t chan) { static const struct bwn_b206x_chan *bc = NULL; struct bwn_softc *sc = mac->mac_sc; uint32_t count, freqref, freqvco, freqxtal, val[3], timeout, timeoutref, tmp[6]; uint16_t old, scale, tmp16; int i, div; for (i = 0; i < N(bwn_b2063_chantable); i++) { if (bwn_b2063_chantable[i].bc_chan == chan) { bc = &bwn_b2063_chantable[i]; break; } } if (bc == NULL) return (EINVAL); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_VCOBUF1, bc->bc_data[0]); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_MIXER2, bc->bc_data[1]); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_BUF2, bc->bc_data[2]); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_RCCR1, bc->bc_data[3]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_1ST3, bc->bc_data[4]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND1, bc->bc_data[5]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND4, bc->bc_data[6]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_2ND7, bc->bc_data[7]); BWN_RF_WRITE(mac, BWN_B2063_A_RX_PS6, bc->bc_data[8]); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_CTL2, bc->bc_data[9]); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_CTL5, bc->bc_data[10]); BWN_RF_WRITE(mac, BWN_B2063_PA_CTL11, bc->bc_data[11]); old = BWN_RF_READ(mac, BWN_B2063_COM15); BWN_RF_SET(mac, BWN_B2063_COM15, 0x1e); freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000; freqvco = bc->bc_freq << ((bc->bc_freq > 4000) ? 1 : 2); freqref = freqxtal * 3; div = (freqxtal <= 26000000 ? 1 : 2); timeout = ((((8 * freqxtal) / (div * 5000000)) + 1) >> 1) - 1; timeoutref = ((((8 * freqxtal) / (div * (timeout + 1))) + 999999) / 1000000) + 1; BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB3, 0x2); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB6, 0xfff8, timeout >> 2); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB7, 0xff9f,timeout << 5); BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB5, timeoutref); val[0] = bwn_phy_lp_roundup(freqxtal, 1000000, 16); val[1] = bwn_phy_lp_roundup(freqxtal, 1000000 * div, 16); val[2] = bwn_phy_lp_roundup(freqvco, 3, 16); count = (bwn_phy_lp_roundup(val[2], val[1] + 16, 16) * (timeout + 1) * (timeoutref + 1)) - 1; BWN_RF_SETMASK(mac, BWN_B2063_JTAG_VCO_CALIB7, 0xf0, count >> 8); BWN_RF_WRITE(mac, BWN_B2063_JTAG_VCO_CALIB8, count & 0xff); tmp[0] = ((val[2] * 62500) / freqref) << 4; tmp[1] = ((val[2] * 62500) % freqref) << 4; while (tmp[1] >= freqref) { tmp[0]++; tmp[1] -= freqref; } BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG1, 0xffe0, tmp[0] >> 4); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG2, 0xfe0f, tmp[0] << 4); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_SG2, 0xfff0, tmp[0] >> 16); BWN_RF_WRITE(mac, BWN_B2063_JTAG_SG3, (tmp[1] >> 8) & 0xff); BWN_RF_WRITE(mac, BWN_B2063_JTAG_SG4, tmp[1] & 0xff); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF1, 0xb9); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF2, 0x88); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF3, 0x28); BWN_RF_WRITE(mac, BWN_B2063_JTAG_LF4, 0x63); tmp[2] = ((41 * (val[2] - 3000)) /1200) + 27; tmp[3] = bwn_phy_lp_roundup(132000 * tmp[0], 8451, 16); if (howmany(tmp[3], tmp[2]) > 60) { scale = 1; tmp[4] = ((tmp[3] + tmp[2]) / (tmp[2] << 1)) - 8; } else { scale = 0; tmp[4] = ((tmp[3] + (tmp[2] >> 1)) / tmp[2]) - 8; } BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP2, 0xffc0, tmp[4]); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP2, 0xffbf, scale << 6); tmp[5] = bwn_phy_lp_roundup(100 * val[0], val[2], 16) * (tmp[4] * 8) * (scale + 1); if (tmp[5] > 150) tmp[5] = 0; BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP3, 0xffe0, tmp[5]); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_CP3, 0xffdf, scale << 5); BWN_RF_SETMASK(mac, BWN_B2063_JTAG_XTAL_12, 0xfffb, 0x4); if (freqxtal > 26000000) BWN_RF_SET(mac, BWN_B2063_JTAG_XTAL_12, 0x2); else BWN_RF_MASK(mac, BWN_B2063_JTAG_XTAL_12, 0xfd); if (val[0] == 45) BWN_RF_SET(mac, BWN_B2063_JTAG_VCO1, 0x2); else BWN_RF_MASK(mac, BWN_B2063_JTAG_VCO1, 0xfd); BWN_RF_SET(mac, BWN_B2063_PLL_SP2, 0x3); DELAY(1); BWN_RF_MASK(mac, BWN_B2063_PLL_SP2, 0xfffc); /* VCO Calibration */ BWN_RF_MASK(mac, BWN_B2063_PLL_SP1, ~0x40); tmp16 = BWN_RF_READ(mac, BWN_B2063_JTAG_CALNRST) & 0xf8; BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16); DELAY(1); BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x4); DELAY(1); BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x6); DELAY(1); BWN_RF_WRITE(mac, BWN_B2063_JTAG_CALNRST, tmp16 | 0x7); DELAY(300); BWN_RF_SET(mac, BWN_B2063_PLL_SP1, 0x40); BWN_RF_WRITE(mac, BWN_B2063_COM15, old); return (0); } static int bwn_phy_lp_b2062_switch_channel(struct bwn_mac *mac, uint8_t chan) { struct bwn_softc *sc = mac->mac_sc; struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; const struct bwn_b206x_chan *bc = NULL; uint32_t freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000; uint32_t tmp[9]; int i; for (i = 0; i < N(bwn_b2062_chantable); i++) { if (bwn_b2062_chantable[i].bc_chan == chan) { bc = &bwn_b2062_chantable[i]; break; } } if (bc == NULL) return (EINVAL); BWN_RF_SET(mac, BWN_B2062_S_RFPLLCTL14, 0x04); BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE0, bc->bc_data[0]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE2, bc->bc_data[1]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENATUNE3, bc->bc_data[2]); BWN_RF_WRITE(mac, BWN_B2062_N_TX_TUNE, bc->bc_data[3]); BWN_RF_WRITE(mac, BWN_B2062_S_LGENG_CTL1, bc->bc_data[4]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENACTL5, bc->bc_data[5]); BWN_RF_WRITE(mac, BWN_B2062_N_LGENACTL6, bc->bc_data[6]); BWN_RF_WRITE(mac, BWN_B2062_N_TX_PGA, bc->bc_data[7]); BWN_RF_WRITE(mac, BWN_B2062_N_TX_PAD, bc->bc_data[8]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL33, 0xcc); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL34, 0x07); bwn_phy_lp_b2062_reset_pllbias(mac); tmp[0] = freqxtal / 1000; tmp[1] = plp->plp_div * 1000; tmp[2] = tmp[1] * ieee80211_ieee2mhz(chan, 0); if (ieee80211_ieee2mhz(chan, 0) < 4000) tmp[2] *= 2; tmp[3] = 48 * tmp[0]; tmp[5] = tmp[2] / tmp[3]; tmp[6] = tmp[2] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL26, tmp[5]); tmp[4] = tmp[6] * 0x100; tmp[5] = tmp[4] / tmp[3]; tmp[6] = tmp[4] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL27, tmp[5]); tmp[4] = tmp[6] * 0x100; tmp[5] = tmp[4] / tmp[3]; tmp[6] = tmp[4] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL28, tmp[5]); tmp[4] = tmp[6] * 0x100; tmp[5] = tmp[4] / tmp[3]; tmp[6] = tmp[4] % tmp[3]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL29, tmp[5] + ((2 * tmp[6]) / tmp[3])); tmp[7] = BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL19); tmp[8] = ((2 * tmp[2] * (tmp[7] + 1)) + (3 * tmp[0])) / (6 * tmp[0]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL23, (tmp[8] >> 8) + 16); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL24, tmp[8] & 0xff); bwn_phy_lp_b2062_vco_calib(mac); if (BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL3) & 0x10) { BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL33, 0xfc); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL34, 0); bwn_phy_lp_b2062_reset_pllbias(mac); bwn_phy_lp_b2062_vco_calib(mac); if (BWN_RF_READ(mac, BWN_B2062_S_RFPLLCTL3) & 0x10) { BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL14, ~0x04); return (EIO); } } BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL14, ~0x04); return (0); } static void bwn_phy_lp_set_anafilter(struct bwn_mac *mac, uint8_t channel) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; uint16_t tmp = (channel == 14); if (mac->mac_phy.rev < 2) { BWN_PHY_SETMASK(mac, BWN_PHY_LP_PHY_CTL, 0xfcff, tmp << 9); if ((mac->mac_phy.rev == 1) && (plp->plp_rccap)) bwn_phy_lp_set_rccap(mac); return; } BWN_RF_WRITE(mac, BWN_B2063_TX_BB_SP3, 0x3f); } static void bwn_phy_lp_set_gaintbl(struct bwn_mac *mac, uint32_t freq) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t iso, tmp[3]; KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) iso = plp->plp_txisoband_m; else if (freq <= 5320) iso = plp->plp_txisoband_l; else if (freq <= 5700) iso = plp->plp_txisoband_m; else iso = plp->plp_txisoband_h; tmp[0] = ((iso - 26) / 12) << 12; tmp[1] = tmp[0] + 0x1000; tmp[2] = tmp[0] + 0x2000; bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), 3, tmp); bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), 3, tmp); } static void bwn_phy_lp_digflt_save(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; int i; static const uint16_t addr[] = { BWN_PHY_OFDM(0xc1), BWN_PHY_OFDM(0xc2), BWN_PHY_OFDM(0xc3), BWN_PHY_OFDM(0xc4), BWN_PHY_OFDM(0xc5), BWN_PHY_OFDM(0xc6), BWN_PHY_OFDM(0xc7), BWN_PHY_OFDM(0xc8), BWN_PHY_OFDM(0xcf), }; static const uint16_t val[] = { 0xde5e, 0xe832, 0xe331, 0x4d26, 0x0026, 0x1420, 0x0020, 0xfe08, 0x0008, }; for (i = 0; i < N(addr); i++) { plp->plp_digfilt[i] = BWN_PHY_READ(mac, addr[i]); BWN_PHY_WRITE(mac, addr[i], val[i]); } } static void bwn_phy_lp_get_txpctlmode(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; uint16_t ctl; ctl = BWN_PHY_READ(mac, BWN_PHY_TX_PWR_CTL_CMD); switch (ctl & BWN_PHY_TX_PWR_CTL_CMD_MODE) { case BWN_PHY_TX_PWR_CTL_CMD_MODE_OFF: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_OFF; break; case BWN_PHY_TX_PWR_CTL_CMD_MODE_SW: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_ON_SW; break; case BWN_PHY_TX_PWR_CTL_CMD_MODE_HW: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_ON_HW; break; default: plp->plp_txpctlmode = BWN_PHYLP_TXPCTL_UNKNOWN; device_printf(sc->sc_dev, "unknown command mode\n"); break; } } static void bwn_phy_lp_set_txpctlmode(struct bwn_mac *mac, uint8_t mode) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; uint16_t ctl; uint8_t old; bwn_phy_lp_get_txpctlmode(mac); old = plp->plp_txpctlmode; if (old == mode) return; plp->plp_txpctlmode = mode; if (old != BWN_PHYLP_TXPCTL_ON_HW && mode == BWN_PHYLP_TXPCTL_ON_HW) { BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_CMD, 0xff80, plp->plp_tssiidx); BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_NNUM, 0x8fff, ((uint16_t)plp->plp_tssinpt << 16)); /* disable TX GAIN override */ if (mac->mac_phy.rev < 2) BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfeff); else { BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xff7f); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xbfff); } BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVR, 0xffbf); plp->plp_txpwridx = -1; } if (mac->mac_phy.rev >= 2) { if (mode == BWN_PHYLP_TXPCTL_ON_HW) BWN_PHY_SET(mac, BWN_PHY_OFDM(0xd0), 0x2); else BWN_PHY_MASK(mac, BWN_PHY_OFDM(0xd0), 0xfffd); } /* writes TX Power Control mode */ switch (plp->plp_txpctlmode) { case BWN_PHYLP_TXPCTL_OFF: ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_OFF; break; case BWN_PHYLP_TXPCTL_ON_HW: ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_HW; break; case BWN_PHYLP_TXPCTL_ON_SW: ctl = BWN_PHY_TX_PWR_CTL_CMD_MODE_SW; break; default: ctl = 0; KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } BWN_PHY_SETMASK(mac, BWN_PHY_TX_PWR_CTL_CMD, (uint16_t)~BWN_PHY_TX_PWR_CTL_CMD_MODE, ctl); } static void bwn_phy_lp_bugfix(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; const unsigned int size = 256; struct bwn_txgain tg; uint32_t rxcomp, txgain, coeff, rfpwr, *tabs; uint16_t tssinpt, tssiidx, value[2]; uint8_t mode; int8_t txpwridx; - tabs = (uint32_t *)malloc(sizeof(uint32_t) * size, M_DEVBUF, + tabs = (uint32_t *)mallocarray(size, sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (tabs == NULL) { device_printf(sc->sc_dev, "failed to allocate buffer.\n"); return; } bwn_phy_lp_get_txpctlmode(mac); mode = plp->plp_txpctlmode; txpwridx = plp->plp_txpwridx; tssinpt = plp->plp_tssinpt; tssiidx = plp->plp_tssiidx; bwn_tab_read_multi(mac, (mac->mac_phy.rev < 2) ? BWN_TAB_4(10, 0x140) : BWN_TAB_4(7, 0x140), size, tabs); bwn_phy_lp_tblinit(mac); bwn_phy_lp_bbinit(mac); bwn_phy_lp_txpctl_init(mac); bwn_phy_lp_rf_onoff(mac, 1); bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); bwn_tab_write_multi(mac, (mac->mac_phy.rev < 2) ? BWN_TAB_4(10, 0x140) : BWN_TAB_4(7, 0x140), size, tabs); BWN_WRITE_2(mac, BWN_CHANNEL, plp->plp_chan); plp->plp_tssinpt = tssinpt; plp->plp_tssiidx = tssiidx; bwn_phy_lp_set_anafilter(mac, plp->plp_chan); if (txpwridx != -1) { /* set TX power by index */ plp->plp_txpwridx = txpwridx; bwn_phy_lp_get_txpctlmode(mac); if (plp->plp_txpctlmode != BWN_PHYLP_TXPCTL_OFF) bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_ON_SW); if (mac->mac_phy.rev >= 2) { rxcomp = bwn_tab_read(mac, BWN_TAB_4(7, txpwridx + 320)); txgain = bwn_tab_read(mac, BWN_TAB_4(7, txpwridx + 192)); tg.tg_pad = (txgain >> 16) & 0xff; tg.tg_gm = txgain & 0xff; tg.tg_pga = (txgain >> 8) & 0xff; tg.tg_dac = (rxcomp >> 28) & 0xff; bwn_phy_lp_set_txgain(mac, &tg); } else { rxcomp = bwn_tab_read(mac, BWN_TAB_4(10, txpwridx + 320)); txgain = bwn_tab_read(mac, BWN_TAB_4(10, txpwridx + 192)); BWN_PHY_SETMASK(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0xf800, (txgain >> 4) & 0x7fff); bwn_phy_lp_set_txgain_dac(mac, txgain & 0x7); bwn_phy_lp_set_txgain_pa(mac, (txgain >> 24) & 0x7f); } bwn_phy_lp_set_bbmult(mac, (rxcomp >> 20) & 0xff); /* set TX IQCC */ value[0] = (rxcomp >> 10) & 0x3ff; value[1] = rxcomp & 0x3ff; bwn_tab_write_multi(mac, BWN_TAB_2(0, 80), 2, value); coeff = bwn_tab_read(mac, (mac->mac_phy.rev >= 2) ? BWN_TAB_4(7, txpwridx + 448) : BWN_TAB_4(10, txpwridx + 448)); bwn_tab_write(mac, BWN_TAB_2(0, 85), coeff & 0xffff); if (mac->mac_phy.rev >= 2) { rfpwr = bwn_tab_read(mac, BWN_TAB_4(7, txpwridx + 576)); BWN_PHY_SETMASK(mac, BWN_PHY_RF_PWR_OVERRIDE, 0xff00, rfpwr & 0xffff); } bwn_phy_lp_set_txgain_override(mac); } if (plp->plp_rccap) bwn_phy_lp_set_rccap(mac); bwn_phy_lp_set_antenna(mac, plp->plp_antenna); bwn_phy_lp_set_txpctlmode(mac, mode); free(tabs, M_DEVBUF); } static void bwn_phy_lp_digflt_restore(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; int i; static const uint16_t addr[] = { BWN_PHY_OFDM(0xc1), BWN_PHY_OFDM(0xc2), BWN_PHY_OFDM(0xc3), BWN_PHY_OFDM(0xc4), BWN_PHY_OFDM(0xc5), BWN_PHY_OFDM(0xc6), BWN_PHY_OFDM(0xc7), BWN_PHY_OFDM(0xc8), BWN_PHY_OFDM(0xcf), }; for (i = 0; i < N(addr); i++) BWN_PHY_WRITE(mac, addr[i], plp->plp_digfilt[i]); } static void bwn_phy_lp_tblinit(struct bwn_mac *mac) { uint32_t freq = ieee80211_ieee2mhz(bwn_phy_lp_get_default_chan(mac), 0); if (mac->mac_phy.rev < 2) { bwn_phy_lp_tblinit_r01(mac); bwn_phy_lp_tblinit_txgain(mac); bwn_phy_lp_set_gaintbl(mac, freq); return; } bwn_phy_lp_tblinit_r2(mac); bwn_phy_lp_tblinit_txgain(mac); } struct bwn_wpair { uint16_t reg; uint16_t value; }; struct bwn_smpair { uint16_t offset; uint16_t mask; uint16_t set; }; static void bwn_phy_lp_bbinit_r2(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_wpair v1[] = { { BWN_PHY_AFE_DAC_CTL, 0x50 }, { BWN_PHY_AFE_CTL, 0x8800 }, { BWN_PHY_AFE_CTL_OVR, 0 }, { BWN_PHY_AFE_CTL_OVRVAL, 0 }, { BWN_PHY_RF_OVERRIDE_0, 0 }, { BWN_PHY_RF_OVERRIDE_2, 0 }, { BWN_PHY_OFDM(0xf9), 0 }, { BWN_PHY_TR_LOOKUP_1, 0 } }; static const struct bwn_smpair v2[] = { { BWN_PHY_OFDMSYNCTHRESH0, 0xff00, 0xb4 }, { BWN_PHY_DCOFFSETTRANSIENT, 0xf8ff, 0x200 }, { BWN_PHY_DCOFFSETTRANSIENT, 0xff00, 0x7f }, { BWN_PHY_GAINDIRECTMISMATCH, 0xff0f, 0x40 }, { BWN_PHY_PREAMBLECONFIRMTO, 0xff00, 0x2 } }; static const struct bwn_smpair v3[] = { { BWN_PHY_OFDM(0xfe), 0xffe0, 0x1f }, { BWN_PHY_OFDM(0xff), 0xffe0, 0xc }, { BWN_PHY_OFDM(0x100), 0xff00, 0x19 }, { BWN_PHY_OFDM(0xff), 0x03ff, 0x3c00 }, { BWN_PHY_OFDM(0xfe), 0xfc1f, 0x3e0 }, { BWN_PHY_OFDM(0xff), 0xffe0, 0xc }, { BWN_PHY_OFDM(0x100), 0x00ff, 0x1900 }, { BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x5800 }, { BWN_PHY_CLIPCTRTHRESH, 0xffe0, 0x12 }, { BWN_PHY_GAINMISMATCH, 0x0fff, 0x9000 }, }; int i; for (i = 0; i < N(v1); i++) BWN_PHY_WRITE(mac, v1[i].reg, v1[i].value); BWN_PHY_SET(mac, BWN_PHY_ADC_COMPENSATION_CTL, 0x10); for (i = 0; i < N(v2); i++) BWN_PHY_SETMASK(mac, v2[i].offset, v2[i].mask, v2[i].set); BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x4000); BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x2000); BWN_PHY_SET(mac, BWN_PHY_OFDM(0x10a), 0x1); if (siba_get_pci_revid(sc->sc_dev) >= 0x18) { bwn_tab_write(mac, BWN_TAB_4(17, 65), 0xec); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x10a), 0xff01, 0x14); } else { BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0x10a), 0xff01, 0x10); } BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xdf), 0xff00, 0xf4); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xdf), 0x00ff, 0xf100); BWN_PHY_WRITE(mac, BWN_PHY_CLIPTHRESH, 0x48); BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0xff00, 0x46); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xe4), 0xff00, 0x10); BWN_PHY_SETMASK(mac, BWN_PHY_PWR_THRESH1, 0xfff0, 0x9); BWN_PHY_MASK(mac, BWN_PHY_GAINDIRECTMISMATCH, ~0xf); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0x00ff, 0x5500); BWN_PHY_SETMASK(mac, BWN_PHY_CLIPCTRTHRESH, 0xfc1f, 0xa0); BWN_PHY_SETMASK(mac, BWN_PHY_GAINDIRECTMISMATCH, 0xe0ff, 0x300); BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0x00ff, 0x2a00); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x2100); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0xa); } else { BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x1e00); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0xd); } for (i = 0; i < N(v3); i++) BWN_PHY_SETMASK(mac, v3[i].offset, v3[i].mask, v3[i].set); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { bwn_tab_write(mac, BWN_TAB_2(0x08, 0x14), 0); bwn_tab_write(mac, BWN_TAB_2(0x08, 0x12), 0x40); } if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x40); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xf0ff, 0xb00); BWN_PHY_SETMASK(mac, BWN_PHY_SYNCPEAKCNT, 0xfff8, 0x6); BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0x00ff, 0x9d00); BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0xff00, 0xa1); BWN_PHY_MASK(mac, BWN_PHY_IDLEAFTERPKTRXTO, 0x00ff); } else BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, ~0x40); BWN_PHY_SETMASK(mac, BWN_PHY_CRS_ED_THRESH, 0xff00, 0xb3); BWN_PHY_SETMASK(mac, BWN_PHY_CRS_ED_THRESH, 0x00ff, 0xad00); BWN_PHY_SETMASK(mac, BWN_PHY_INPUT_PWRDB, 0xff00, plp->plp_rxpwroffset); BWN_PHY_SET(mac, BWN_PHY_RESET_CTL, 0x44); BWN_PHY_WRITE(mac, BWN_PHY_RESET_CTL, 0x80); BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_0, 0xa954); BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_1, 0x2000 | ((uint16_t)plp->plp_rssigs << 10) | ((uint16_t)plp->plp_rssivc << 4) | plp->plp_rssivf); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { BWN_PHY_SET(mac, BWN_PHY_AFE_ADC_CTL_0, 0x1c); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_CTL, 0x00ff, 0x8800); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_ADC_CTL_1, 0xfc3c, 0x0400); } bwn_phy_lp_digflt_save(mac); } static void bwn_phy_lp_bbinit_r01(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_smpair v1[] = { { BWN_PHY_CLIPCTRTHRESH, 0xffe0, 0x0005 }, { BWN_PHY_CLIPCTRTHRESH, 0xfc1f, 0x0180 }, { BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x3c00 }, { BWN_PHY_GAINDIRECTMISMATCH, 0xfff0, 0x0005 }, { BWN_PHY_GAIN_MISMATCH_LIMIT, 0xffc0, 0x001a }, { BWN_PHY_CRS_ED_THRESH, 0xff00, 0x00b3 }, { BWN_PHY_CRS_ED_THRESH, 0x00ff, 0xad00 } }; static const struct bwn_smpair v2[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_1, 0x3f00, 0x0900 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0400 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_5, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_5, 0xc0ff, 0x0900 }, { BWN_PHY_TR_LOOKUP_6, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_6, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_7, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_7, 0xc0ff, 0x0900 }, { BWN_PHY_TR_LOOKUP_8, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_8, 0xc0ff, 0x0b00 } }; static const struct bwn_smpair v3[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x0001 }, { BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0400 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x0001 }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0500 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0800 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0a00 } }; static const struct bwn_smpair v4[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x0004 }, { BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0800 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x0004 }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0c00 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0100 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0002 }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0300 } }; static const struct bwn_smpair v5[] = { { BWN_PHY_TR_LOOKUP_1, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_1, 0xc0ff, 0x0900 }, { BWN_PHY_TR_LOOKUP_2, 0xffc0, 0x000a }, { BWN_PHY_TR_LOOKUP_2, 0xc0ff, 0x0b00 }, { BWN_PHY_TR_LOOKUP_3, 0xffc0, 0x0006 }, { BWN_PHY_TR_LOOKUP_3, 0xc0ff, 0x0500 }, { BWN_PHY_TR_LOOKUP_4, 0xffc0, 0x0006 }, { BWN_PHY_TR_LOOKUP_4, 0xc0ff, 0x0700 } }; int i; uint16_t tmp, tmp2; BWN_PHY_MASK(mac, BWN_PHY_AFE_DAC_CTL, 0xf7ff); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL, 0); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVR, 0); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_0, 0); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, 0); BWN_PHY_SET(mac, BWN_PHY_AFE_DAC_CTL, 0x0004); BWN_PHY_SETMASK(mac, BWN_PHY_OFDMSYNCTHRESH0, 0xff00, 0x0078); BWN_PHY_SETMASK(mac, BWN_PHY_CLIPCTRTHRESH, 0x83ff, 0x5800); BWN_PHY_WRITE(mac, BWN_PHY_ADC_COMPENSATION_CTL, 0x0016); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_ADC_CTL_0, 0xfff8, 0x0004); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0x00ff, 0x5400); BWN_PHY_SETMASK(mac, BWN_PHY_HIGAINDB, 0x00ff, 0x2400); BWN_PHY_SETMASK(mac, BWN_PHY_LOWGAINDB, 0x00ff, 0x2100); BWN_PHY_SETMASK(mac, BWN_PHY_VERYLOWGAINDB, 0xff00, 0x0006); BWN_PHY_MASK(mac, BWN_PHY_RX_RADIO_CTL, 0xfffe); for (i = 0; i < N(v1); i++) BWN_PHY_SETMASK(mac, v1[i].offset, v1[i].mask, v1[i].set); BWN_PHY_SETMASK(mac, BWN_PHY_INPUT_PWRDB, 0xff00, plp->plp_rxpwroffset); if ((siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM) && ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) || (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_LDO_PAREF))) { siba_cc_pmu_set_ldovolt(sc->sc_dev, SIBA_LDO_PAREF, 0x28); siba_cc_pmu_set_ldoparef(sc->sc_dev, 1); if (mac->mac_phy.rev == 0) BWN_PHY_SETMASK(mac, BWN_PHY_LP_RF_SIGNAL_LUT, 0xffcf, 0x0010); bwn_tab_write(mac, BWN_TAB_2(11, 7), 60); } else { siba_cc_pmu_set_ldoparef(sc->sc_dev, 0); BWN_PHY_SETMASK(mac, BWN_PHY_LP_RF_SIGNAL_LUT, 0xffcf, 0x0020); bwn_tab_write(mac, BWN_TAB_2(11, 7), 100); } tmp = plp->plp_rssivf | plp->plp_rssivc << 4 | 0xa000; BWN_PHY_WRITE(mac, BWN_PHY_AFE_RSSI_CTL_0, tmp); if (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_RSSIINV) BWN_PHY_SETMASK(mac, BWN_PHY_AFE_RSSI_CTL_1, 0xf000, 0x0aaa); else BWN_PHY_SETMASK(mac, BWN_PHY_AFE_RSSI_CTL_1, 0xf000, 0x02aa); bwn_tab_write(mac, BWN_TAB_2(11, 1), 24); BWN_PHY_SETMASK(mac, BWN_PHY_RX_RADIO_CTL, 0xfff9, (plp->plp_bxarch << 1)); if (mac->mac_phy.rev == 1 && (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_FEM_BT)) { for (i = 0; i < N(v2); i++) BWN_PHY_SETMASK(mac, v2[i].offset, v2[i].mask, v2[i].set); } else if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan) || (siba_get_pci_subdevice(sc->sc_dev) == 0x048a) || ((mac->mac_phy.rev == 0) && (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM))) { for (i = 0; i < N(v3); i++) BWN_PHY_SETMASK(mac, v3[i].offset, v3[i].mask, v3[i].set); } else if (mac->mac_phy.rev == 1 || (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_FEM)) { for (i = 0; i < N(v4); i++) BWN_PHY_SETMASK(mac, v4[i].offset, v4[i].mask, v4[i].set); } else { for (i = 0; i < N(v5); i++) BWN_PHY_SETMASK(mac, v5[i].offset, v5[i].mask, v5[i].set); } if (mac->mac_phy.rev == 1 && (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_LDO_PAREF)) { BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_5, BWN_PHY_TR_LOOKUP_1); BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_6, BWN_PHY_TR_LOOKUP_2); BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_7, BWN_PHY_TR_LOOKUP_3); BWN_PHY_COPY(mac, BWN_PHY_TR_LOOKUP_8, BWN_PHY_TR_LOOKUP_4); } if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_FEM_BT) && (siba_get_chipid(sc->sc_dev) == 0x5354) && (siba_get_chippkg(sc->sc_dev) == SIBA_CHIPPACK_BCM4712S)) { BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x0006); BWN_PHY_WRITE(mac, BWN_PHY_GPIO_SELECT, 0x0005); BWN_PHY_WRITE(mac, BWN_PHY_GPIO_OUTEN, 0xffff); bwn_hf_write(mac, bwn_hf_read(mac) | BWN_HF_PR45960W); } if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_LP_PHY_CTL, 0x8000); BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x0040); BWN_PHY_SETMASK(mac, BWN_PHY_MINPWR_LEVEL, 0x00ff, 0xa400); BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xf0ff, 0x0b00); BWN_PHY_SETMASK(mac, BWN_PHY_SYNCPEAKCNT, 0xfff8, 0x0007); BWN_PHY_SETMASK(mac, BWN_PHY_DSSS_CONFIRM_CNT, 0xfff8, 0x0003); BWN_PHY_SETMASK(mac, BWN_PHY_DSSS_CONFIRM_CNT, 0xffc7, 0x0020); BWN_PHY_MASK(mac, BWN_PHY_IDLEAFTERPKTRXTO, 0x00ff); } else { BWN_PHY_MASK(mac, BWN_PHY_LP_PHY_CTL, 0x7fff); BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, 0xffbf); } if (mac->mac_phy.rev == 1) { tmp = BWN_PHY_READ(mac, BWN_PHY_CLIPCTRTHRESH); tmp2 = (tmp & 0x03e0) >> 5; tmp2 |= tmp2 << 5; BWN_PHY_WRITE(mac, BWN_PHY_4C3, tmp2); tmp = BWN_PHY_READ(mac, BWN_PHY_GAINDIRECTMISMATCH); tmp2 = (tmp & 0x1f00) >> 8; tmp2 |= tmp2 << 5; BWN_PHY_WRITE(mac, BWN_PHY_4C4, tmp2); tmp = BWN_PHY_READ(mac, BWN_PHY_VERYLOWGAINDB); tmp2 = tmp & 0x00ff; tmp2 |= tmp << 8; BWN_PHY_WRITE(mac, BWN_PHY_4C5, tmp2); } } struct bwn_b2062_freq { uint16_t freq; uint8_t value[6]; }; static void bwn_phy_lp_b2062_init(struct bwn_mac *mac) { #define CALC_CTL7(freq, div) \ (((800000000 * (div) + (freq)) / (2 * (freq)) - 8) & 0xff) #define CALC_CTL18(freq, div) \ ((((100 * (freq) + 16000000 * (div)) / (32000000 * (div))) - 1) & 0xff) #define CALC_CTL19(freq, div) \ ((((2 * (freq) + 1000000 * (div)) / (2000000 * (div))) - 1) & 0xff) struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_b2062_freq freqdata_tab[] = { { 12000, { 6, 6, 6, 6, 10, 6 } }, { 13000, { 4, 4, 4, 4, 11, 7 } }, { 14400, { 3, 3, 3, 3, 12, 7 } }, { 16200, { 3, 3, 3, 3, 13, 8 } }, { 18000, { 2, 2, 2, 2, 14, 8 } }, { 19200, { 1, 1, 1, 1, 14, 9 } } }; static const struct bwn_wpair v1[] = { { BWN_B2062_N_TXCTL3, 0 }, { BWN_B2062_N_TXCTL4, 0 }, { BWN_B2062_N_TXCTL5, 0 }, { BWN_B2062_N_TXCTL6, 0 }, { BWN_B2062_N_PDNCTL0, 0x40 }, { BWN_B2062_N_PDNCTL0, 0 }, { BWN_B2062_N_CALIB_TS, 0x10 }, { BWN_B2062_N_CALIB_TS, 0 } }; const struct bwn_b2062_freq *f = NULL; uint32_t xtalfreq, ref; unsigned int i; bwn_phy_lp_b2062_tblinit(mac); for (i = 0; i < N(v1); i++) BWN_RF_WRITE(mac, v1[i].reg, v1[i].value); if (mac->mac_phy.rev > 0) BWN_RF_WRITE(mac, BWN_B2062_S_BG_CTL1, (BWN_RF_READ(mac, BWN_B2062_N_COM2) >> 1) | 0x80); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) BWN_RF_SET(mac, BWN_B2062_N_TSSI_CTL0, 0x1); else BWN_RF_MASK(mac, BWN_B2062_N_TSSI_CTL0, ~0x1); KASSERT(siba_get_cc_caps(sc->sc_dev) & SIBA_CC_CAPS_PMU, ("%s:%d: fail", __func__, __LINE__)); xtalfreq = siba_get_cc_pmufreq(sc->sc_dev) * 1000; KASSERT(xtalfreq != 0, ("%s:%d: fail", __func__, __LINE__)); if (xtalfreq <= 30000000) { plp->plp_div = 1; BWN_RF_MASK(mac, BWN_B2062_S_RFPLLCTL1, 0xfffb); } else { plp->plp_div = 2; BWN_RF_SET(mac, BWN_B2062_S_RFPLLCTL1, 0x4); } BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL7, CALC_CTL7(xtalfreq, plp->plp_div)); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL18, CALC_CTL18(xtalfreq, plp->plp_div)); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL19, CALC_CTL19(xtalfreq, plp->plp_div)); ref = (1000 * plp->plp_div + 2 * xtalfreq) / (2000 * plp->plp_div); ref &= 0xffff; for (i = 0; i < N(freqdata_tab); i++) { if (ref < freqdata_tab[i].freq) { f = &freqdata_tab[i]; break; } } if (f == NULL) f = &freqdata_tab[N(freqdata_tab) - 1]; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL8, ((uint16_t)(f->value[1]) << 4) | f->value[0]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL9, ((uint16_t)(f->value[3]) << 4) | f->value[2]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL10, f->value[4]); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL11, f->value[5]); #undef CALC_CTL7 #undef CALC_CTL18 #undef CALC_CTL19 } static void bwn_phy_lp_b2063_init(struct bwn_mac *mac) { bwn_phy_lp_b2063_tblinit(mac); BWN_RF_WRITE(mac, BWN_B2063_LOGEN_SP5, 0); BWN_RF_SET(mac, BWN_B2063_COM8, 0x38); BWN_RF_WRITE(mac, BWN_B2063_REG_SP1, 0x56); BWN_RF_MASK(mac, BWN_B2063_RX_BB_CTL2, ~0x2); BWN_RF_WRITE(mac, BWN_B2063_PA_SP7, 0); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_SP6, 0x20); BWN_RF_WRITE(mac, BWN_B2063_TX_RF_SP9, 0x40); if (mac->mac_phy.rev == 2) { BWN_RF_WRITE(mac, BWN_B2063_PA_SP3, 0xa0); BWN_RF_WRITE(mac, BWN_B2063_PA_SP4, 0xa0); BWN_RF_WRITE(mac, BWN_B2063_PA_SP2, 0x18); } else { BWN_RF_WRITE(mac, BWN_B2063_PA_SP3, 0x20); BWN_RF_WRITE(mac, BWN_B2063_PA_SP2, 0x20); } } static void bwn_phy_lp_rxcal_r2(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; static const struct bwn_wpair v1[] = { { BWN_B2063_RX_BB_SP8, 0x0 }, { BWN_B2063_RC_CALIB_CTL1, 0x7e }, { BWN_B2063_RC_CALIB_CTL1, 0x7c }, { BWN_B2063_RC_CALIB_CTL2, 0x15 }, { BWN_B2063_RC_CALIB_CTL3, 0x70 }, { BWN_B2063_RC_CALIB_CTL4, 0x52 }, { BWN_B2063_RC_CALIB_CTL5, 0x1 }, { BWN_B2063_RC_CALIB_CTL1, 0x7d } }; static const struct bwn_wpair v2[] = { { BWN_B2063_TX_BB_SP3, 0x0 }, { BWN_B2063_RC_CALIB_CTL1, 0x7e }, { BWN_B2063_RC_CALIB_CTL1, 0x7c }, { BWN_B2063_RC_CALIB_CTL2, 0x55 }, { BWN_B2063_RC_CALIB_CTL3, 0x76 } }; uint32_t freqxtal = siba_get_cc_pmufreq(sc->sc_dev) * 1000; int i; uint8_t tmp; tmp = BWN_RF_READ(mac, BWN_B2063_RX_BB_SP8) & 0xff; for (i = 0; i < 2; i++) BWN_RF_WRITE(mac, v1[i].reg, v1[i].value); BWN_RF_MASK(mac, BWN_B2063_PLL_SP1, 0xf7); for (i = 2; i < N(v1); i++) BWN_RF_WRITE(mac, v1[i].reg, v1[i].value); for (i = 0; i < 10000; i++) { if (BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2) break; DELAY(1000); } if (!(BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2)) BWN_RF_WRITE(mac, BWN_B2063_RX_BB_SP8, tmp); tmp = BWN_RF_READ(mac, BWN_B2063_TX_BB_SP3) & 0xff; for (i = 0; i < N(v2); i++) BWN_RF_WRITE(mac, v2[i].reg, v2[i].value); if (freqxtal == 24000000) { BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL4, 0xfc); BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL5, 0x0); } else { BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL4, 0x13); BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL5, 0x1); } BWN_RF_WRITE(mac, BWN_B2063_PA_SP7, 0x7d); for (i = 0; i < 10000; i++) { if (BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2) break; DELAY(1000); } if (!(BWN_RF_READ(mac, BWN_B2063_RC_CALIB_CTL6) & 0x2)) BWN_RF_WRITE(mac, BWN_B2063_TX_BB_SP3, tmp); BWN_RF_WRITE(mac, BWN_B2063_RC_CALIB_CTL1, 0x7e); } static void bwn_phy_lp_rccal_r12(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct bwn_phy_lp_iq_est ie; struct bwn_txgain tx_gains; static const uint32_t pwrtbl[21] = { 0x10000, 0x10557, 0x10e2d, 0x113e0, 0x10f22, 0x0ff64, 0x0eda2, 0x0e5d4, 0x0efd1, 0x0fbe8, 0x0b7b8, 0x04b35, 0x01a5e, 0x00a0b, 0x00444, 0x001fd, 0x000ff, 0x00088, 0x0004c, 0x0002c, 0x0001a, }; uint32_t npwr, ipwr, sqpwr, tmp; int loopback, i, j, sum, error; uint16_t save[7]; uint8_t txo, bbmult, txpctlmode; error = bwn_phy_lp_switch_channel(mac, 7); if (error) device_printf(sc->sc_dev, "failed to change channel to 7 (%d)\n", error); txo = (BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR) & 0x40) ? 1 : 0; bbmult = bwn_phy_lp_get_bbmult(mac); if (txo) tx_gains = bwn_phy_lp_get_txgain(mac); save[0] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_0); save[1] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_VAL_0); save[2] = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVR); save[3] = BWN_PHY_READ(mac, BWN_PHY_AFE_CTL_OVRVAL); save[4] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_2); save[5] = BWN_PHY_READ(mac, BWN_PHY_RF_OVERRIDE_2_VAL); save[6] = BWN_PHY_READ(mac, BWN_PHY_LP_PHY_CTL); bwn_phy_lp_get_txpctlmode(mac); txpctlmode = plp->plp_txpctlmode; bwn_phy_lp_set_txpctlmode(mac, BWN_PHYLP_TXPCTL_OFF); /* disable CRS */ bwn_phy_lp_set_deaf(mac, 1); bwn_phy_lp_set_trsw_over(mac, 0, 1); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffb); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x4); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfff7); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x10); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x10); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffdf); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x20); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xffbf); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x40); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x7); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x38); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xff3f); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0x100); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfdff); BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL0, 0); BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL1, 1); BWN_PHY_WRITE(mac, BWN_PHY_PS_CTL_OVERRIDE_VAL2, 0x20); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xf7ff); BWN_PHY_WRITE(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0); BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, 0x45af); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, 0x3ff); loopback = bwn_phy_lp_loopback(mac); if (loopback == -1) goto done; bwn_phy_lp_set_rxgain_idx(mac, loopback); BWN_PHY_SETMASK(mac, BWN_PHY_LP_PHY_CTL, 0xffbf, 0x40); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfff8, 0x1); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xffc7, 0x8); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xff3f, 0xc0); tmp = 0; memset(&ie, 0, sizeof(ie)); for (i = 128; i <= 159; i++) { BWN_RF_WRITE(mac, BWN_B2062_N_RXBB_CALIB2, i); sum = 0; for (j = 5; j <= 25; j++) { bwn_phy_lp_ddfs_turnon(mac, 1, 1, j, j, 0); if (!(bwn_phy_lp_rx_iq_est(mac, 1000, 32, &ie))) goto done; sqpwr = ie.ie_ipwr + ie.ie_qpwr; ipwr = ((pwrtbl[j - 5] >> 3) + 1) >> 1; npwr = bwn_phy_lp_roundup(sqpwr, (j == 5) ? sqpwr : 0, 12); sum += ((ipwr - npwr) * (ipwr - npwr)); if ((i == 128) || (sum < tmp)) { plp->plp_rccap = i; tmp = sum; } } } bwn_phy_lp_ddfs_turnoff(mac); done: /* restore CRS */ bwn_phy_lp_clear_deaf(mac, 1); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_0, 0xff80); BWN_PHY_MASK(mac, BWN_PHY_RF_OVERRIDE_2, 0xfc00); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_VAL_0, save[1]); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_0, save[0]); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVRVAL, save[3]); BWN_PHY_WRITE(mac, BWN_PHY_AFE_CTL_OVR, save[2]); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2_VAL, save[5]); BWN_PHY_WRITE(mac, BWN_PHY_RF_OVERRIDE_2, save[4]); BWN_PHY_WRITE(mac, BWN_PHY_LP_PHY_CTL, save[6]); bwn_phy_lp_set_bbmult(mac, bbmult); if (txo) bwn_phy_lp_set_txgain(mac, &tx_gains); bwn_phy_lp_set_txpctlmode(mac, txpctlmode); if (plp->plp_rccap) bwn_phy_lp_set_rccap(mac); } static void bwn_phy_lp_set_rccap(struct bwn_mac *mac) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; uint8_t rc_cap = (plp->plp_rccap & 0x1f) >> 1; if (mac->mac_phy.rev == 1) rc_cap = MIN(rc_cap + 5, 15); BWN_RF_WRITE(mac, BWN_B2062_N_RXBB_CALIB2, MAX(plp->plp_rccap - 4, 0x80)); BWN_RF_WRITE(mac, BWN_B2062_N_TXCTL_A, rc_cap | 0x80); BWN_RF_WRITE(mac, BWN_B2062_S_RXG_CNT16, ((plp->plp_rccap & 0x1f) >> 2) | 0x80); } static uint32_t bwn_phy_lp_roundup(uint32_t value, uint32_t div, uint8_t pre) { uint32_t i, q, r; if (div == 0) return (0); for (i = 0, q = value / div, r = value % div; i < pre; i++) { q <<= 1; if (r << 1 >= div) { q++; r = (r << 1) - div; } } if (r << 1 >= div) q++; return (q); } static void bwn_phy_lp_b2062_reset_pllbias(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 0xff); DELAY(20); if (siba_get_chipid(sc->sc_dev) == 0x5354) { BWN_RF_WRITE(mac, BWN_B2062_N_COM1, 4); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 4); } else { BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL2, 0); } DELAY(5); } static void bwn_phy_lp_b2062_vco_calib(struct bwn_mac *mac) { BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL21, 0x42); BWN_RF_WRITE(mac, BWN_B2062_S_RFPLLCTL21, 0x62); DELAY(200); } static void bwn_phy_lp_b2062_tblinit(struct bwn_mac *mac) { #define FLAG_A 0x01 #define FLAG_G 0x02 struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_b206x_rfinit_entry bwn_b2062_init_tab[] = { { BWN_B2062_N_COM4, 0x1, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_PDNCTL1, 0x0, 0xca, FLAG_G, }, { BWN_B2062_N_PDNCTL3, 0x0, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_PDNCTL4, 0x15, 0x2a, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENC, 0xDB, 0xff, FLAG_A, }, { BWN_B2062_N_LGENATUNE0, 0xdd, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENATUNE2, 0xdd, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENATUNE3, 0x77, 0xB5, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENACTL3, 0x0, 0xff, FLAG_A | FLAG_G, }, { BWN_B2062_N_LGENACTL7, 0x33, 0x33, FLAG_A | FLAG_G, }, { BWN_B2062_N_RXA_CTL1, 0x0, 0x0, FLAG_G, }, { BWN_B2062_N_RXBB_CTL0, 0x82, 0x80, FLAG_A | FLAG_G, }, { BWN_B2062_N_RXBB_GAIN1, 0x4, 0x4, FLAG_A | FLAG_G, }, { BWN_B2062_N_RXBB_GAIN2, 0x0, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_N_TXCTL4, 0x3, 0x3, FLAG_A | FLAG_G, }, { BWN_B2062_N_TXCTL5, 0x2, 0x2, FLAG_A | FLAG_G, }, { BWN_B2062_N_TX_TUNE, 0x88, 0x1b, FLAG_A | FLAG_G, }, { BWN_B2062_S_COM4, 0x1, 0x0, FLAG_A | FLAG_G, }, { BWN_B2062_S_PDS_CTL0, 0xff, 0xff, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL0, 0xf8, 0xd8, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL1, 0x3c, 0x24, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL8, 0x88, 0x80, FLAG_A | FLAG_G, }, { BWN_B2062_S_LGENG_CTL10, 0x88, 0x80, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL0, 0x98, 0x98, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL1, 0x10, 0x10, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL5, 0x43, 0x43, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL6, 0x47, 0x47, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL7, 0xc, 0xc, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL8, 0x11, 0x11, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL9, 0x11, 0x11, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL10, 0xe, 0xe, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL11, 0x8, 0x8, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL12, 0x33, 0x33, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL13, 0xa, 0xa, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL14, 0x6, 0x6, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL18, 0x3e, 0x3e, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL19, 0x13, 0x13, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL21, 0x62, 0x62, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL22, 0x7, 0x7, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL23, 0x16, 0x16, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL24, 0x5c, 0x5c, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL25, 0x95, 0x95, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL30, 0xa0, 0xa0, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL31, 0x4, 0x4, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL33, 0xcc, 0xcc, FLAG_A | FLAG_G, }, { BWN_B2062_S_RFPLLCTL34, 0x7, 0x7, FLAG_A | FLAG_G, }, { BWN_B2062_S_RXG_CNT8, 0xf, 0xf, FLAG_A, }, }; const struct bwn_b206x_rfinit_entry *br; unsigned int i; for (i = 0; i < N(bwn_b2062_init_tab); i++) { br = &bwn_b2062_init_tab[i]; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { if (br->br_flags & FLAG_G) BWN_RF_WRITE(mac, br->br_offset, br->br_valueg); } else { if (br->br_flags & FLAG_A) BWN_RF_WRITE(mac, br->br_offset, br->br_valuea); } } #undef FLAG_A #undef FLAG_B } static void bwn_phy_lp_b2063_tblinit(struct bwn_mac *mac) { #define FLAG_A 0x01 #define FLAG_G 0x02 struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static const struct bwn_b206x_rfinit_entry bwn_b2063_init_tab[] = { { BWN_B2063_COM1, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM10, 0x1, 0x0, FLAG_A, }, { BWN_B2063_COM16, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM17, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM18, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM19, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM20, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM21, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM22, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM23, 0x0, 0x0, FLAG_G, }, { BWN_B2063_COM24, 0x0, 0x0, FLAG_G, }, { BWN_B2063_LOGEN_SP1, 0xe8, 0xd4, FLAG_A | FLAG_G, }, { BWN_B2063_LOGEN_SP2, 0xa7, 0x53, FLAG_A | FLAG_G, }, { BWN_B2063_LOGEN_SP4, 0xf0, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_SP1, 0x1f, 0x5e, FLAG_G, }, { BWN_B2063_G_RX_SP2, 0x7f, 0x7e, FLAG_G, }, { BWN_B2063_G_RX_SP3, 0x30, 0xf0, FLAG_G, }, { BWN_B2063_G_RX_SP7, 0x7f, 0x7f, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_SP10, 0xc, 0xc, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_SP1, 0x3c, 0x3f, FLAG_A, }, { BWN_B2063_A_RX_SP2, 0xfc, 0xfe, FLAG_A, }, { BWN_B2063_A_RX_SP7, 0x8, 0x8, FLAG_A | FLAG_G, }, { BWN_B2063_RX_BB_SP4, 0x60, 0x60, FLAG_A | FLAG_G, }, { BWN_B2063_RX_BB_SP8, 0x30, 0x30, FLAG_A | FLAG_G, }, { BWN_B2063_TX_RF_SP3, 0xc, 0xb, FLAG_A | FLAG_G, }, { BWN_B2063_TX_RF_SP4, 0x10, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_PA_SP1, 0x3d, 0xfd, FLAG_A | FLAG_G, }, { BWN_B2063_TX_BB_SP1, 0x2, 0x2, FLAG_A | FLAG_G, }, { BWN_B2063_BANDGAP_CTL1, 0x56, 0x56, FLAG_A | FLAG_G, }, { BWN_B2063_JTAG_VCO2, 0xF7, 0xF7, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_MIX3, 0x71, 0x71, FLAG_A | FLAG_G, }, { BWN_B2063_G_RX_MIX4, 0x71, 0x71, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_1ST2, 0xf0, 0x30, FLAG_A, }, { BWN_B2063_A_RX_PS6, 0x77, 0x77, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_MIX4, 0x3, 0x3, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_MIX5, 0xf, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_A_RX_MIX6, 0xf, 0xf, FLAG_A | FLAG_G, }, { BWN_B2063_RX_TIA_CTL1, 0x77, 0x77, FLAG_A | FLAG_G, }, { BWN_B2063_RX_TIA_CTL3, 0x77, 0x77, FLAG_A | FLAG_G, }, { BWN_B2063_RX_BB_CTL2, 0x4, 0x4, FLAG_A | FLAG_G, }, { BWN_B2063_PA_CTL1, 0x0, 0x4, FLAG_A, }, { BWN_B2063_VREG_CTL1, 0x3, 0x3, FLAG_A | FLAG_G, }, }; const struct bwn_b206x_rfinit_entry *br; unsigned int i; for (i = 0; i < N(bwn_b2063_init_tab); i++) { br = &bwn_b2063_init_tab[i]; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { if (br->br_flags & FLAG_G) BWN_RF_WRITE(mac, br->br_offset, br->br_valueg); } else { if (br->br_flags & FLAG_A) BWN_RF_WRITE(mac, br->br_offset, br->br_valuea); } } #undef FLAG_A #undef FLAG_B } static void bwn_tab_read_multi(struct bwn_mac *mac, uint32_t typenoffset, int count, void *_data) { unsigned int i; uint32_t offset, type; uint8_t *data = _data; type = BWN_TAB_GETTYPE(typenoffset); offset = BWN_TAB_GETOFFSET(typenoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); for (i = 0; i < count; i++) { switch (type) { case BWN_TAB_8BIT: *data = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO) & 0xff; data++; break; case BWN_TAB_16BIT: *((uint16_t *)data) = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); data += 2; break; case BWN_TAB_32BIT: *((uint32_t *)data) = BWN_PHY_READ(mac, BWN_PHY_TABLEDATAHI); *((uint32_t *)data) <<= 16; *((uint32_t *)data) |= BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); data += 4; break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static void bwn_tab_write_multi(struct bwn_mac *mac, uint32_t typenoffset, int count, const void *_data) { uint32_t offset, type, value; const uint8_t *data = _data; unsigned int i; type = BWN_TAB_GETTYPE(typenoffset); offset = BWN_TAB_GETOFFSET(typenoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); for (i = 0; i < count; i++) { switch (type) { case BWN_TAB_8BIT: value = *data; data++; KASSERT(!(value & ~0xff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_16BIT: value = *((const uint16_t *)data); data += 2; KASSERT(!(value & ~0xffff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_32BIT: value = *((const uint32_t *)data); data += 4; BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATAHI, value >> 16); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } } static struct bwn_txgain bwn_phy_lp_get_txgain(struct bwn_mac *mac) { struct bwn_txgain tg; uint16_t tmp; tg.tg_dac = (BWN_PHY_READ(mac, BWN_PHY_AFE_DAC_CTL) & 0x380) >> 7; if (mac->mac_phy.rev < 2) { tmp = BWN_PHY_READ(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL) & 0x7ff; tg.tg_gm = tmp & 0x0007; tg.tg_pga = (tmp & 0x0078) >> 3; tg.tg_pad = (tmp & 0x780) >> 7; return (tg); } tmp = BWN_PHY_READ(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL); tg.tg_pad = BWN_PHY_READ(mac, BWN_PHY_OFDM(0xfb)) & 0xff; tg.tg_gm = tmp & 0xff; tg.tg_pga = (tmp >> 8) & 0xff; return (tg); } static uint8_t bwn_phy_lp_get_bbmult(struct bwn_mac *mac) { return (bwn_tab_read(mac, BWN_TAB_2(0, 87)) & 0xff00) >> 8; } static void bwn_phy_lp_set_txgain(struct bwn_mac *mac, struct bwn_txgain *tg) { uint16_t pa; if (mac->mac_phy.rev < 2) { BWN_PHY_SETMASK(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, 0xf800, (tg->tg_pad << 7) | (tg->tg_pga << 3) | tg->tg_gm); bwn_phy_lp_set_txgain_dac(mac, tg->tg_dac); bwn_phy_lp_set_txgain_override(mac); return; } pa = bwn_phy_lp_get_pa_gain(mac); BWN_PHY_WRITE(mac, BWN_PHY_TX_GAIN_CTL_OVERRIDE_VAL, (tg->tg_pga << 8) | tg->tg_gm); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfb), 0x8000, tg->tg_pad | (pa << 6)); BWN_PHY_WRITE(mac, BWN_PHY_OFDM(0xfc), (tg->tg_pga << 8) | tg->tg_gm); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfd), 0x8000, tg->tg_pad | (pa << 8)); bwn_phy_lp_set_txgain_dac(mac, tg->tg_dac); bwn_phy_lp_set_txgain_override(mac); } static void bwn_phy_lp_set_bbmult(struct bwn_mac *mac, uint8_t bbmult) { bwn_tab_write(mac, BWN_TAB_2(0, 87), (uint16_t)bbmult << 8); } static void bwn_phy_lp_set_trsw_over(struct bwn_mac *mac, uint8_t tx, uint8_t rx) { uint16_t trsw = (tx << 1) | rx; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffc, trsw); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x3); } static void bwn_phy_lp_set_rxgain(struct bwn_mac *mac, uint32_t gain) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint16_t ext_lna, high_gain, lna, low_gain, trsw, tmp; if (mac->mac_phy.rev < 2) { trsw = gain & 0x1; lna = (gain & 0xfffc) | ((gain & 0xc) >> 2); ext_lna = (gain & 2) >> 1; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffe, trsw); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff, ext_lna << 10); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xf7ff, ext_lna << 11); BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, lna); } else { low_gain = gain & 0xffff; high_gain = (gain >> 16) & 0xf; ext_lna = (gain >> 21) & 0x1; trsw = ~(gain >> 20) & 0x1; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0xfffe, trsw); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfdff, ext_lna << 9); BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xfbff, ext_lna << 10); BWN_PHY_WRITE(mac, BWN_PHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xfff0, high_gain); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { tmp = (gain >> 2) & 0x3; BWN_PHY_SETMASK(mac, BWN_PHY_RF_OVERRIDE_2_VAL, 0xe7ff, tmp<<11); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xe6), 0xffe7, tmp << 3); } } BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x1); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x10); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x40); if (mac->mac_phy.rev >= 2) { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x100); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x400); BWN_PHY_SET(mac, BWN_PHY_OFDM(0xe5), 0x8); } return; } BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x200); } static void bwn_phy_lp_set_deaf(struct bwn_mac *mac, uint8_t user) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; if (user) plp->plp_crsusr_off = 1; else plp->plp_crssys_off = 1; BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x80); } static void bwn_phy_lp_clear_deaf(struct bwn_mac *mac, uint8_t user) { struct bwn_phy_lp *plp = &mac->mac_phy.phy_lp; struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; if (user) plp->plp_crsusr_off = 0; else plp->plp_crssys_off = 0; if (plp->plp_crsusr_off || plp->plp_crssys_off) return; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x60); else BWN_PHY_SETMASK(mac, BWN_PHY_CRSGAIN_CTL, 0xff1f, 0x20); } static int bwn_phy_lp_calc_rx_iq_comp(struct bwn_mac *mac, uint16_t sample) { #define CALC_COEFF(_v, _x, _y, _z) do { \ int _t; \ _t = _x - 20; \ if (_t >= 0) { \ _v = ((_y << (30 - _x)) + (_z >> (1 + _t))) / (_z >> _t); \ } else { \ _v = ((_y << (30 - _x)) + (_z << (-1 - _t))) / (_z << -_t); \ } \ } while (0) #define CALC_COEFF2(_v, _x, _y, _z) do { \ int _t; \ _t = _x - 11; \ if (_t >= 0) \ _v = (_y << (31 - _x)) / (_z >> _t); \ else \ _v = (_y << (31 - _x)) / (_z << -_t); \ } while (0) struct bwn_phy_lp_iq_est ie; uint16_t v0, v1; int tmp[2], ret; v1 = BWN_PHY_READ(mac, BWN_PHY_RX_COMP_COEFF_S); v0 = v1 >> 8; v1 |= 0xff; BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, 0x00c0); BWN_PHY_MASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff); ret = bwn_phy_lp_rx_iq_est(mac, sample, 32, &ie); if (ret == 0) goto done; if (ie.ie_ipwr + ie.ie_qpwr < 2) { ret = 0; goto done; } CALC_COEFF(tmp[0], bwn_nbits(ie.ie_iqprod), ie.ie_iqprod, ie.ie_ipwr); CALC_COEFF2(tmp[1], bwn_nbits(ie.ie_qpwr), ie.ie_qpwr, ie.ie_ipwr); tmp[1] = -bwn_sqrt(mac, tmp[1] - (tmp[0] * tmp[0])); v0 = tmp[0] >> 3; v1 = tmp[1] >> 4; done: BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0xff00, v1); BWN_PHY_SETMASK(mac, BWN_PHY_RX_COMP_COEFF_S, 0x00ff, v0 << 8); return ret; #undef CALC_COEFF #undef CALC_COEFF2 } static void bwn_phy_lp_tblinit_r01(struct bwn_mac *mac) { static const uint16_t noisescale[] = { 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa400, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0xa4a4, 0x00a4, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4c00, 0x2d36, 0x0000, 0x0000, 0x4c00, 0x2d36, }; static const uint16_t crsgainnft[] = { 0x0366, 0x036a, 0x036f, 0x0364, 0x0367, 0x036d, 0x0374, 0x037f, 0x036f, 0x037b, 0x038a, 0x0378, 0x0367, 0x036d, 0x0375, 0x0381, 0x0374, 0x0381, 0x0392, 0x03a9, 0x03c4, 0x03e1, 0x0001, 0x001f, 0x0040, 0x005e, 0x007f, 0x009e, 0x00bd, 0x00dd, 0x00fd, 0x011d, 0x013d, }; static const uint16_t filterctl[] = { 0xa0fc, 0x10fc, 0x10db, 0x20b7, 0xff93, 0x10bf, 0x109b, 0x2077, 0xff53, 0x0127, }; static const uint32_t psctl[] = { 0x00010000, 0x000000a0, 0x00040000, 0x00000048, 0x08080101, 0x00000080, 0x08080101, 0x00000040, 0x08080101, 0x000000c0, 0x08a81501, 0x000000c0, 0x0fe8fd01, 0x000000c0, 0x08300105, 0x000000c0, 0x08080201, 0x000000c0, 0x08280205, 0x000000c0, 0xe80802fe, 0x000000c7, 0x28080206, 0x000000c0, 0x08080202, 0x000000c0, 0x0ba87602, 0x000000c0, 0x1068013d, 0x000000c0, 0x10280105, 0x000000c0, 0x08880102, 0x000000c0, 0x08280106, 0x000000c0, 0xe80801fd, 0x000000c7, 0xa8080115, 0x000000c0, }; static const uint16_t ofdmcckgain_r0[] = { 0x0001, 0x0001, 0x0001, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d, }; static const uint16_t ofdmcckgain_r1[] = { 0x5000, 0x6000, 0x7000, 0x0001, 0x1001, 0x2001, 0x3001, 0x4001, 0x5001, 0x6001, 0x7001, 0x7011, 0x7021, 0x2035, 0x2045, 0x2055, 0x2065, 0x2075, 0x006d, 0x007d, 0x014d, 0x015d, 0x115d, 0x035d, 0x135d, 0x055d, 0x155d, 0x0d5d, 0x1d5d, 0x2d5d, 0x555d, 0x655d, 0x755d, }; static const uint16_t gaindelta[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const uint32_t txpwrctl[] = { 0x00000050, 0x0000004f, 0x0000004e, 0x0000004d, 0x0000004c, 0x0000004b, 0x0000004a, 0x00000049, 0x00000048, 0x00000047, 0x00000046, 0x00000045, 0x00000044, 0x00000043, 0x00000042, 0x00000041, 0x00000040, 0x0000003f, 0x0000003e, 0x0000003d, 0x0000003c, 0x0000003b, 0x0000003a, 0x00000039, 0x00000038, 0x00000037, 0x00000036, 0x00000035, 0x00000034, 0x00000033, 0x00000032, 0x00000031, 0x00000030, 0x0000002f, 0x0000002e, 0x0000002d, 0x0000002c, 0x0000002b, 0x0000002a, 0x00000029, 0x00000028, 0x00000027, 0x00000026, 0x00000025, 0x00000024, 0x00000023, 0x00000022, 0x00000021, 0x00000020, 0x0000001f, 0x0000001e, 0x0000001d, 0x0000001c, 0x0000001b, 0x0000001a, 0x00000019, 0x00000018, 0x00000017, 0x00000016, 0x00000015, 0x00000014, 0x00000013, 0x00000012, 0x00000011, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x000075a0, 0x000075a0, 0x000075a1, 0x000075a1, 0x000075a2, 0x000075a2, 0x000075a3, 0x000075a3, 0x000074b0, 0x000074b0, 0x000074b1, 0x000074b1, 0x000074b2, 0x000074b2, 0x000074b3, 0x000074b3, 0x00006d20, 0x00006d20, 0x00006d21, 0x00006d21, 0x00006d22, 0x00006d22, 0x00006d23, 0x00006d23, 0x00004660, 0x00004660, 0x00004661, 0x00004661, 0x00004662, 0x00004662, 0x00004663, 0x00004663, 0x00003e60, 0x00003e60, 0x00003e61, 0x00003e61, 0x00003e62, 0x00003e62, 0x00003e63, 0x00003e63, 0x00003660, 0x00003660, 0x00003661, 0x00003661, 0x00003662, 0x00003662, 0x00003663, 0x00003663, 0x00002e60, 0x00002e60, 0x00002e61, 0x00002e61, 0x00002e62, 0x00002e62, 0x00002e63, 0x00002e63, 0x00002660, 0x00002660, 0x00002661, 0x00002661, 0x00002662, 0x00002662, 0x00002663, 0x00002663, 0x000025e0, 0x000025e0, 0x000025e1, 0x000025e1, 0x000025e2, 0x000025e2, 0x000025e3, 0x000025e3, 0x00001de0, 0x00001de0, 0x00001de1, 0x00001de1, 0x00001de2, 0x00001de2, 0x00001de3, 0x00001de3, 0x00001d60, 0x00001d60, 0x00001d61, 0x00001d61, 0x00001d62, 0x00001d62, 0x00001d63, 0x00001d63, 0x00001560, 0x00001560, 0x00001561, 0x00001561, 0x00001562, 0x00001562, 0x00001563, 0x00001563, 0x00000d60, 0x00000d60, 0x00000d61, 0x00000d61, 0x00000d62, 0x00000d62, 0x00000d63, 0x00000d63, 0x00000ce0, 0x00000ce0, 0x00000ce1, 0x00000ce1, 0x00000ce2, 0x00000ce2, 0x00000ce3, 0x00000ce3, 0x00000e10, 0x00000e10, 0x00000e11, 0x00000e11, 0x00000e12, 0x00000e12, 0x00000e13, 0x00000e13, 0x00000bf0, 0x00000bf0, 0x00000bf1, 0x00000bf1, 0x00000bf2, 0x00000bf2, 0x00000bf3, 0x00000bf3, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x04200000, 0x04000000, 0x000000ff, 0x000002fc, 0x0000fa08, 0x00000305, 0x00000206, 0x00000304, 0x0000fb04, 0x0000fcff, 0x000005fb, 0x0000fd01, 0x00000401, 0x00000006, 0x0000ff03, 0x000007fc, 0x0000fc08, 0x00000203, 0x0000fffb, 0x00000600, 0x0000fa01, 0x0000fc03, 0x0000fe06, 0x0000fe00, 0x00000102, 0x000007fd, 0x000004fb, 0x000006ff, 0x000004fd, 0x0000fdfa, 0x000007fb, 0x0000fdfa, 0x0000fa06, 0x00000500, 0x0000f902, 0x000007fa, 0x0000fafa, 0x00000500, 0x000007fa, 0x00000700, 0x00000305, 0x000004ff, 0x00000801, 0x00000503, 0x000005f9, 0x00000404, 0x0000fb08, 0x000005fd, 0x00000501, 0x00000405, 0x0000fb03, 0x000007fc, 0x00000403, 0x00000303, 0x00000402, 0x0000faff, 0x0000fe05, 0x000005fd, 0x0000fe01, 0x000007fa, 0x00000202, 0x00000504, 0x00000102, 0x000008fe, 0x0000fa04, 0x0000fafc, 0x0000fe08, 0x000000f9, 0x000002fa, 0x000003fe, 0x00000304, 0x000004f9, 0x00000100, 0x0000fd06, 0x000008fc, 0x00000701, 0x00000504, 0x0000fdfe, 0x0000fdfc, 0x000003fe, 0x00000704, 0x000002fc, 0x000004f9, 0x0000fdfd, 0x0000fa07, 0x00000205, 0x000003fd, 0x000005fb, 0x000004f9, 0x00000804, 0x0000fc06, 0x0000fcf9, 0x00000100, 0x0000fe05, 0x00000408, 0x0000fb02, 0x00000304, 0x000006fe, 0x000004fa, 0x00000305, 0x000008fc, 0x00000102, 0x000001fd, 0x000004fc, 0x0000fe03, 0x00000701, 0x000001fb, 0x000001f9, 0x00000206, 0x000006fd, 0x00000508, 0x00000700, 0x00000304, 0x000005fe, 0x000005ff, 0x0000fa04, 0x00000303, 0x0000fefb, 0x000007f9, 0x0000fefc, 0x000004fd, 0x000005fc, 0x0000fffd, 0x0000fc08, 0x0000fbf9, 0x0000fd07, 0x000008fb, 0x0000fe02, 0x000006fb, 0x00000702, }; KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); bwn_tab_write_multi(mac, BWN_TAB_1(2, 0), N(bwn_tab_sigsq_tbl), bwn_tab_sigsq_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(1, 0), N(noisescale), noisescale); bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(crsgainnft), crsgainnft); bwn_tab_write_multi(mac, BWN_TAB_2(8, 0), N(filterctl), filterctl); bwn_tab_write_multi(mac, BWN_TAB_4(9, 0), N(psctl), psctl); bwn_tab_write_multi(mac, BWN_TAB_1(6, 0), N(bwn_tab_pllfrac_tbl), bwn_tab_pllfrac_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(0, 0), N(bwn_tabl_iqlocal_tbl), bwn_tabl_iqlocal_tbl); if (mac->mac_phy.rev == 0) { bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), N(ofdmcckgain_r0), ofdmcckgain_r0); bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), N(ofdmcckgain_r0), ofdmcckgain_r0); } else { bwn_tab_write_multi(mac, BWN_TAB_2(13, 0), N(ofdmcckgain_r1), ofdmcckgain_r1); bwn_tab_write_multi(mac, BWN_TAB_2(12, 0), N(ofdmcckgain_r1), ofdmcckgain_r1); } bwn_tab_write_multi(mac, BWN_TAB_2(15, 0), N(gaindelta), gaindelta); bwn_tab_write_multi(mac, BWN_TAB_4(10, 0), N(txpwrctl), txpwrctl); } static void bwn_phy_lp_tblinit_r2(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; int i; static const uint16_t noisescale[] = { 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x0000, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4, 0x00a4 }; static const uint32_t filterctl[] = { 0x000141fc, 0x000021fc, 0x000021b7, 0x0000416f, 0x0001ff27, 0x0000217f, 0x00002137, 0x000040ef, 0x0001fea7, 0x0000024f }; static const uint32_t psctl[] = { 0x00e38e08, 0x00e08e38, 0x00000000, 0x00000000, 0x00000000, 0x00002080, 0x00006180, 0x00003002, 0x00000040, 0x00002042, 0x00180047, 0x00080043, 0x00000041, 0x000020c1, 0x00046006, 0x00042002, 0x00040000, 0x00002003, 0x00180006, 0x00080002 }; static const uint32_t gainidx[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082, 0x00000000, 0x40000104, 0x00000000, 0x60004207, 0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001, 0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010, 0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019, 0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x10000001, 0x00000000, 0x20000082, 0x00000000, 0x40000104, 0x00000000, 0x60004207, 0x00000001, 0x7000838a, 0x00000001, 0xd021050d, 0x00000001, 0xe041c683, 0x00000001, 0x50828805, 0x00000000, 0x80e34288, 0x00000000, 0xb144040b, 0x00000000, 0xe1a6058e, 0x00000000, 0x12064711, 0x00000001, 0xb0a18612, 0x00000010, 0xe1024794, 0x00000010, 0x11630915, 0x00000011, 0x31c3ca1b, 0x00000011, 0xc1848a9c, 0x00000018, 0xf1e50da0, 0x00000018, 0x22468e21, 0x00000019, 0x4286d023, 0x00000019, 0xa347d0a4, 0x00000019, 0xb36811a6, 0x00000019, 0xf3e89227, 0x00000019, 0x0408d329, 0x0000001a, 0x244953aa, 0x0000001a, 0x346994ab, 0x0000001a, 0x54aa152c, 0x0000001a, 0x64ca55ad, 0x0000001a }; static const uint16_t auxgainidx[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0016, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0002, 0x0004, 0x0016 }; static const uint16_t swctl[] = { 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0128, 0x0128, 0x0009, 0x0009, 0x0028, 0x0028, 0x0028, 0x0028, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0009, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018, 0x0018 }; static const uint8_t hf[] = { 0x4b, 0x36, 0x24, 0x18, 0x49, 0x34, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17, 0x48, 0x33, 0x23, 0x17 }; static const uint32_t gainval[] = { 0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000009, 0x000000f1, 0x00000000, 0x00000000 }; static const uint16_t gain[] = { 0x0000, 0x0400, 0x0800, 0x0802, 0x0804, 0x0806, 0x0807, 0x0808, 0x080a, 0x080b, 0x080c, 0x080e, 0x080f, 0x0810, 0x0812, 0x0813, 0x0814, 0x0816, 0x0817, 0x081a, 0x081b, 0x081f, 0x0820, 0x0824, 0x0830, 0x0834, 0x0837, 0x083b, 0x083f, 0x0840, 0x0844, 0x0857, 0x085b, 0x085f, 0x08d7, 0x08db, 0x08df, 0x0957, 0x095b, 0x095f, 0x0b57, 0x0b5b, 0x0b5f, 0x0f5f, 0x135f, 0x175f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }; static const uint32_t papdeps[] = { 0x00000000, 0x00013ffc, 0x0001dff3, 0x0001bff0, 0x00023fe9, 0x00021fdf, 0x00028fdf, 0x00033fd2, 0x00039fcb, 0x00043fc7, 0x0004efc2, 0x00055fb5, 0x0005cfb0, 0x00063fa8, 0x00068fa3, 0x00071f98, 0x0007ef92, 0x00084f8b, 0x0008df82, 0x00097f77, 0x0009df69, 0x000a3f62, 0x000adf57, 0x000b6f4c, 0x000bff41, 0x000c9f39, 0x000cff30, 0x000dbf27, 0x000e4f1e, 0x000edf16, 0x000f7f13, 0x00102f11, 0x00110f10, 0x0011df11, 0x0012ef15, 0x00143f1c, 0x00158f27, 0x00172f35, 0x00193f47, 0x001baf5f, 0x001e6f7e, 0x0021cfa4, 0x0025bfd2, 0x002a2008, 0x002fb047, 0x00360090, 0x003d40e0, 0x0045c135, 0x004fb189, 0x005ae1d7, 0x0067221d, 0x0075025a, 0x007ff291, 0x007ff2bf, 0x007ff2e3, 0x007ff2ff, 0x007ff315, 0x007ff329, 0x007ff33f, 0x007ff356, 0x007ff36e, 0x007ff39c, 0x007ff441, 0x007ff506 }; static const uint32_t papdmult[] = { 0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193, 0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd, 0x00036963, 0x000339f2, 0x00030a89, 0x0002db28 }; static const uint32_t gainidx_a0[] = { 0x001111e0, 0x00652051, 0x00606055, 0x005b005a, 0x00555060, 0x00511065, 0x004c806b, 0x0047d072, 0x00444078, 0x00400080, 0x003ca087, 0x0039408f, 0x0035e098, 0x0032e0a1, 0x003030aa, 0x002d80b4, 0x002ae0bf, 0x002880ca, 0x002640d6, 0x002410e3, 0x002220f0, 0x002020ff, 0x001e510e, 0x001ca11e, 0x001b012f, 0x00199140, 0x00182153, 0x0016c168, 0x0015817d, 0x00145193, 0x001321ab, 0x001211c5, 0x001111e0, 0x001021fc, 0x000f321a, 0x000e523a, 0x000d925c, 0x000cd27f, 0x000c12a5, 0x000b62cd, 0x000ac2f8, 0x000a2325, 0x00099355, 0x00091387, 0x000883bd, 0x000813f5, 0x0007a432, 0x00073471, 0x0006c4b5, 0x000664fc, 0x00061547, 0x0005b598, 0x000565ec, 0x00051646, 0x0004d6a5, 0x0004870a, 0x00044775, 0x000407e6, 0x0003d85e, 0x000398dd, 0x00036963, 0x000339f2, 0x00030a89, 0x0002db28 }; static const uint16_t auxgainidx_a0[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x0014, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0002, 0x0014 }; static const uint32_t gainval_a0[] = { 0x00000008, 0x0000000e, 0x00000014, 0x0000001a, 0x000000fb, 0x00000004, 0x00000008, 0x0000000d, 0x00000001, 0x00000004, 0x00000007, 0x0000000a, 0x0000000d, 0x00000010, 0x00000012, 0x00000015, 0x00000000, 0x00000006, 0x0000000c, 0x00000000, 0x00000000, 0x00000000, 0x00000012, 0x00000000, 0x00000000, 0x00000000, 0x00000018, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000003, 0x00000006, 0x00000009, 0x0000000c, 0x0000000f, 0x00000012, 0x00000015, 0x00000018, 0x0000001b, 0x0000001e, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x0000000f, 0x000000f7, 0x00000000, 0x00000000 }; static const uint16_t gain_a0[] = { 0x0000, 0x0002, 0x0004, 0x0006, 0x0007, 0x0008, 0x000a, 0x000b, 0x000c, 0x000e, 0x000f, 0x0010, 0x0012, 0x0013, 0x0014, 0x0016, 0x0017, 0x001a, 0x001b, 0x001f, 0x0020, 0x0024, 0x0030, 0x0034, 0x0037, 0x003b, 0x003f, 0x0040, 0x0044, 0x0057, 0x005b, 0x005f, 0x00d7, 0x00db, 0x00df, 0x0157, 0x015b, 0x015f, 0x0357, 0x035b, 0x035f, 0x075f, 0x0b5f, 0x0f5f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000 }; KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); for (i = 0; i < 704; i++) bwn_tab_write(mac, BWN_TAB_4(7, i), 0); bwn_tab_write_multi(mac, BWN_TAB_1(2, 0), N(bwn_tab_sigsq_tbl), bwn_tab_sigsq_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(1, 0), N(noisescale), noisescale); bwn_tab_write_multi(mac, BWN_TAB_4(11, 0), N(filterctl), filterctl); bwn_tab_write_multi(mac, BWN_TAB_4(12, 0), N(psctl), psctl); bwn_tab_write_multi(mac, BWN_TAB_4(13, 0), N(gainidx), gainidx); bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(auxgainidx), auxgainidx); bwn_tab_write_multi(mac, BWN_TAB_2(15, 0), N(swctl), swctl); bwn_tab_write_multi(mac, BWN_TAB_1(16, 0), N(hf), hf); bwn_tab_write_multi(mac, BWN_TAB_4(17, 0), N(gainval), gainval); bwn_tab_write_multi(mac, BWN_TAB_2(18, 0), N(gain), gain); bwn_tab_write_multi(mac, BWN_TAB_1(6, 0), N(bwn_tab_pllfrac_tbl), bwn_tab_pllfrac_tbl); bwn_tab_write_multi(mac, BWN_TAB_2(0, 0), N(bwn_tabl_iqlocal_tbl), bwn_tabl_iqlocal_tbl); bwn_tab_write_multi(mac, BWN_TAB_4(9, 0), N(papdeps), papdeps); bwn_tab_write_multi(mac, BWN_TAB_4(10, 0), N(papdmult), papdmult); if ((siba_get_chipid(sc->sc_dev) == 0x4325) && (siba_get_chiprev(sc->sc_dev) == 0)) { bwn_tab_write_multi(mac, BWN_TAB_4(13, 0), N(gainidx_a0), gainidx_a0); bwn_tab_write_multi(mac, BWN_TAB_2(14, 0), N(auxgainidx_a0), auxgainidx_a0); bwn_tab_write_multi(mac, BWN_TAB_4(17, 0), N(gainval_a0), gainval_a0); bwn_tab_write_multi(mac, BWN_TAB_2(18, 0), N(gain_a0), gain_a0); } } static void bwn_phy_lp_tblinit_txgain(struct bwn_mac *mac) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; static struct bwn_txgain_entry txgain_r2[] = { { 255, 255, 203, 0, 152 }, { 255, 255, 203, 0, 147 }, { 255, 255, 203, 0, 143 }, { 255, 255, 203, 0, 139 }, { 255, 255, 203, 0, 135 }, { 255, 255, 203, 0, 131 }, { 255, 255, 203, 0, 128 }, { 255, 255, 203, 0, 124 }, { 255, 255, 203, 0, 121 }, { 255, 255, 203, 0, 117 }, { 255, 255, 203, 0, 114 }, { 255, 255, 203, 0, 111 }, { 255, 255, 203, 0, 107 }, { 255, 255, 203, 0, 104 }, { 255, 255, 203, 0, 101 }, { 255, 255, 203, 0, 99 }, { 255, 255, 203, 0, 96 }, { 255, 255, 203, 0, 93 }, { 255, 255, 203, 0, 90 }, { 255, 255, 203, 0, 88 }, { 255, 255, 203, 0, 85 }, { 255, 255, 203, 0, 83 }, { 255, 255, 203, 0, 81 }, { 255, 255, 203, 0, 78 }, { 255, 255, 203, 0, 76 }, { 255, 255, 203, 0, 74 }, { 255, 255, 203, 0, 72 }, { 255, 255, 203, 0, 70 }, { 255, 255, 203, 0, 68 }, { 255, 255, 203, 0, 66 }, { 255, 255, 203, 0, 64 }, { 255, 255, 197, 0, 64 }, { 255, 255, 192, 0, 64 }, { 255, 255, 186, 0, 64 }, { 255, 255, 181, 0, 64 }, { 255, 255, 176, 0, 64 }, { 255, 255, 171, 0, 64 }, { 255, 255, 166, 0, 64 }, { 255, 255, 161, 0, 64 }, { 255, 255, 157, 0, 64 }, { 255, 255, 152, 0, 64 }, { 255, 255, 148, 0, 64 }, { 255, 255, 144, 0, 64 }, { 255, 255, 140, 0, 64 }, { 255, 255, 136, 0, 64 }, { 255, 255, 132, 0, 64 }, { 255, 255, 128, 0, 64 }, { 255, 255, 124, 0, 64 }, { 255, 255, 121, 0, 64 }, { 255, 255, 117, 0, 64 }, { 255, 255, 114, 0, 64 }, { 255, 255, 111, 0, 64 }, { 255, 255, 108, 0, 64 }, { 255, 255, 105, 0, 64 }, { 255, 255, 102, 0, 64 }, { 255, 255, 99, 0, 64 }, { 255, 255, 96, 0, 64 }, { 255, 255, 93, 0, 64 }, { 255, 255, 91, 0, 64 }, { 255, 255, 88, 0, 64 }, { 255, 255, 86, 0, 64 }, { 255, 255, 83, 0, 64 }, { 255, 255, 81, 0, 64 }, { 255, 255, 79, 0, 64 }, { 255, 255, 76, 0, 64 }, { 255, 255, 74, 0, 64 }, { 255, 255, 72, 0, 64 }, { 255, 255, 70, 0, 64 }, { 255, 255, 68, 0, 64 }, { 255, 255, 66, 0, 64 }, { 255, 255, 64, 0, 64 }, { 255, 248, 64, 0, 64 }, { 255, 248, 62, 0, 64 }, { 255, 241, 62, 0, 64 }, { 255, 241, 60, 0, 64 }, { 255, 234, 60, 0, 64 }, { 255, 234, 59, 0, 64 }, { 255, 227, 59, 0, 64 }, { 255, 227, 57, 0, 64 }, { 255, 221, 57, 0, 64 }, { 255, 221, 55, 0, 64 }, { 255, 215, 55, 0, 64 }, { 255, 215, 54, 0, 64 }, { 255, 208, 54, 0, 64 }, { 255, 208, 52, 0, 64 }, { 255, 203, 52, 0, 64 }, { 255, 203, 51, 0, 64 }, { 255, 197, 51, 0, 64 }, { 255, 197, 49, 0, 64 }, { 255, 191, 49, 0, 64 }, { 255, 191, 48, 0, 64 }, { 255, 186, 48, 0, 64 }, { 255, 186, 47, 0, 64 }, { 255, 181, 47, 0, 64 }, { 255, 181, 45, 0, 64 }, { 255, 175, 45, 0, 64 }, { 255, 175, 44, 0, 64 }, { 255, 170, 44, 0, 64 }, { 255, 170, 43, 0, 64 }, { 255, 166, 43, 0, 64 }, { 255, 166, 42, 0, 64 }, { 255, 161, 42, 0, 64 }, { 255, 161, 40, 0, 64 }, { 255, 156, 40, 0, 64 }, { 255, 156, 39, 0, 64 }, { 255, 152, 39, 0, 64 }, { 255, 152, 38, 0, 64 }, { 255, 148, 38, 0, 64 }, { 255, 148, 37, 0, 64 }, { 255, 143, 37, 0, 64 }, { 255, 143, 36, 0, 64 }, { 255, 139, 36, 0, 64 }, { 255, 139, 35, 0, 64 }, { 255, 135, 35, 0, 64 }, { 255, 135, 34, 0, 64 }, { 255, 132, 34, 0, 64 }, { 255, 132, 33, 0, 64 }, { 255, 128, 33, 0, 64 }, { 255, 128, 32, 0, 64 }, { 255, 124, 32, 0, 64 }, { 255, 124, 31, 0, 64 }, { 255, 121, 31, 0, 64 }, { 255, 121, 30, 0, 64 }, { 255, 117, 30, 0, 64 }, { 255, 117, 29, 0, 64 }, { 255, 114, 29, 0, 64 }, { 255, 114, 29, 0, 64 }, { 255, 111, 29, 0, 64 }, }; static struct bwn_txgain_entry txgain_2ghz_r2[] = { { 7, 99, 255, 0, 64 }, { 7, 96, 255, 0, 64 }, { 7, 93, 255, 0, 64 }, { 7, 90, 255, 0, 64 }, { 7, 88, 255, 0, 64 }, { 7, 85, 255, 0, 64 }, { 7, 83, 255, 0, 64 }, { 7, 81, 255, 0, 64 }, { 7, 78, 255, 0, 64 }, { 7, 76, 255, 0, 64 }, { 7, 74, 255, 0, 64 }, { 7, 72, 255, 0, 64 }, { 7, 70, 255, 0, 64 }, { 7, 68, 255, 0, 64 }, { 7, 66, 255, 0, 64 }, { 7, 64, 255, 0, 64 }, { 7, 64, 255, 0, 64 }, { 7, 62, 255, 0, 64 }, { 7, 62, 248, 0, 64 }, { 7, 60, 248, 0, 64 }, { 7, 60, 241, 0, 64 }, { 7, 59, 241, 0, 64 }, { 7, 59, 234, 0, 64 }, { 7, 57, 234, 0, 64 }, { 7, 57, 227, 0, 64 }, { 7, 55, 227, 0, 64 }, { 7, 55, 221, 0, 64 }, { 7, 54, 221, 0, 64 }, { 7, 54, 215, 0, 64 }, { 7, 52, 215, 0, 64 }, { 7, 52, 208, 0, 64 }, { 7, 51, 208, 0, 64 }, { 7, 51, 203, 0, 64 }, { 7, 49, 203, 0, 64 }, { 7, 49, 197, 0, 64 }, { 7, 48, 197, 0, 64 }, { 7, 48, 191, 0, 64 }, { 7, 47, 191, 0, 64 }, { 7, 47, 186, 0, 64 }, { 7, 45, 186, 0, 64 }, { 7, 45, 181, 0, 64 }, { 7, 44, 181, 0, 64 }, { 7, 44, 175, 0, 64 }, { 7, 43, 175, 0, 64 }, { 7, 43, 170, 0, 64 }, { 7, 42, 170, 0, 64 }, { 7, 42, 166, 0, 64 }, { 7, 40, 166, 0, 64 }, { 7, 40, 161, 0, 64 }, { 7, 39, 161, 0, 64 }, { 7, 39, 156, 0, 64 }, { 7, 38, 156, 0, 64 }, { 7, 38, 152, 0, 64 }, { 7, 37, 152, 0, 64 }, { 7, 37, 148, 0, 64 }, { 7, 36, 148, 0, 64 }, { 7, 36, 143, 0, 64 }, { 7, 35, 143, 0, 64 }, { 7, 35, 139, 0, 64 }, { 7, 34, 139, 0, 64 }, { 7, 34, 135, 0, 64 }, { 7, 33, 135, 0, 64 }, { 7, 33, 132, 0, 64 }, { 7, 32, 132, 0, 64 }, { 7, 32, 128, 0, 64 }, { 7, 31, 128, 0, 64 }, { 7, 31, 124, 0, 64 }, { 7, 30, 124, 0, 64 }, { 7, 30, 121, 0, 64 }, { 7, 29, 121, 0, 64 }, { 7, 29, 117, 0, 64 }, { 7, 29, 117, 0, 64 }, { 7, 29, 114, 0, 64 }, { 7, 28, 114, 0, 64 }, { 7, 28, 111, 0, 64 }, { 7, 27, 111, 0, 64 }, { 7, 27, 108, 0, 64 }, { 7, 26, 108, 0, 64 }, { 7, 26, 104, 0, 64 }, { 7, 25, 104, 0, 64 }, { 7, 25, 102, 0, 64 }, { 7, 25, 102, 0, 64 }, { 7, 25, 99, 0, 64 }, { 7, 24, 99, 0, 64 }, { 7, 24, 96, 0, 64 }, { 7, 23, 96, 0, 64 }, { 7, 23, 93, 0, 64 }, { 7, 23, 93, 0, 64 }, { 7, 23, 90, 0, 64 }, { 7, 22, 90, 0, 64 }, { 7, 22, 88, 0, 64 }, { 7, 21, 88, 0, 64 }, { 7, 21, 85, 0, 64 }, { 7, 21, 85, 0, 64 }, { 7, 21, 83, 0, 64 }, { 7, 20, 83, 0, 64 }, { 7, 20, 81, 0, 64 }, { 7, 20, 81, 0, 64 }, { 7, 20, 78, 0, 64 }, { 7, 19, 78, 0, 64 }, { 7, 19, 76, 0, 64 }, { 7, 19, 76, 0, 64 }, { 7, 19, 74, 0, 64 }, { 7, 18, 74, 0, 64 }, { 7, 18, 72, 0, 64 }, { 7, 18, 72, 0, 64 }, { 7, 18, 70, 0, 64 }, { 7, 17, 70, 0, 64 }, { 7, 17, 68, 0, 64 }, { 7, 17, 68, 0, 64 }, { 7, 17, 66, 0, 64 }, { 7, 16, 66, 0, 64 }, { 7, 16, 64, 0, 64 }, { 7, 16, 64, 0, 64 }, { 7, 16, 62, 0, 64 }, { 7, 15, 62, 0, 64 }, { 7, 15, 60, 0, 64 }, { 7, 15, 60, 0, 64 }, { 7, 15, 59, 0, 64 }, { 7, 14, 59, 0, 64 }, { 7, 14, 57, 0, 64 }, { 7, 14, 57, 0, 64 }, { 7, 14, 55, 0, 64 }, { 7, 14, 55, 0, 64 }, { 7, 14, 54, 0, 64 }, { 7, 13, 54, 0, 64 }, { 7, 13, 52, 0, 64 }, { 7, 13, 52, 0, 64 }, }; static struct bwn_txgain_entry txgain_5ghz_r2[] = { { 255, 255, 255, 0, 152 }, { 255, 255, 255, 0, 147 }, { 255, 255, 255, 0, 143 }, { 255, 255, 255, 0, 139 }, { 255, 255, 255, 0, 135 }, { 255, 255, 255, 0, 131 }, { 255, 255, 255, 0, 128 }, { 255, 255, 255, 0, 124 }, { 255, 255, 255, 0, 121 }, { 255, 255, 255, 0, 117 }, { 255, 255, 255, 0, 114 }, { 255, 255, 255, 0, 111 }, { 255, 255, 255, 0, 107 }, { 255, 255, 255, 0, 104 }, { 255, 255, 255, 0, 101 }, { 255, 255, 255, 0, 99 }, { 255, 255, 255, 0, 96 }, { 255, 255, 255, 0, 93 }, { 255, 255, 255, 0, 90 }, { 255, 255, 255, 0, 88 }, { 255, 255, 255, 0, 85 }, { 255, 255, 255, 0, 83 }, { 255, 255, 255, 0, 81 }, { 255, 255, 255, 0, 78 }, { 255, 255, 255, 0, 76 }, { 255, 255, 255, 0, 74 }, { 255, 255, 255, 0, 72 }, { 255, 255, 255, 0, 70 }, { 255, 255, 255, 0, 68 }, { 255, 255, 255, 0, 66 }, { 255, 255, 255, 0, 64 }, { 255, 255, 248, 0, 64 }, { 255, 255, 241, 0, 64 }, { 255, 255, 234, 0, 64 }, { 255, 255, 227, 0, 64 }, { 255, 255, 221, 0, 64 }, { 255, 255, 215, 0, 64 }, { 255, 255, 208, 0, 64 }, { 255, 255, 203, 0, 64 }, { 255, 255, 197, 0, 64 }, { 255, 255, 191, 0, 64 }, { 255, 255, 186, 0, 64 }, { 255, 255, 181, 0, 64 }, { 255, 255, 175, 0, 64 }, { 255, 255, 170, 0, 64 }, { 255, 255, 166, 0, 64 }, { 255, 255, 161, 0, 64 }, { 255, 255, 156, 0, 64 }, { 255, 255, 152, 0, 64 }, { 255, 255, 148, 0, 64 }, { 255, 255, 143, 0, 64 }, { 255, 255, 139, 0, 64 }, { 255, 255, 135, 0, 64 }, { 255, 255, 132, 0, 64 }, { 255, 255, 128, 0, 64 }, { 255, 255, 124, 0, 64 }, { 255, 255, 121, 0, 64 }, { 255, 255, 117, 0, 64 }, { 255, 255, 114, 0, 64 }, { 255, 255, 111, 0, 64 }, { 255, 255, 108, 0, 64 }, { 255, 255, 104, 0, 64 }, { 255, 255, 102, 0, 64 }, { 255, 255, 99, 0, 64 }, { 255, 255, 96, 0, 64 }, { 255, 255, 93, 0, 64 }, { 255, 255, 90, 0, 64 }, { 255, 255, 88, 0, 64 }, { 255, 255, 85, 0, 64 }, { 255, 255, 83, 0, 64 }, { 255, 255, 81, 0, 64 }, { 255, 255, 78, 0, 64 }, { 255, 255, 76, 0, 64 }, { 255, 255, 74, 0, 64 }, { 255, 255, 72, 0, 64 }, { 255, 255, 70, 0, 64 }, { 255, 255, 68, 0, 64 }, { 255, 255, 66, 0, 64 }, { 255, 255, 64, 0, 64 }, { 255, 255, 64, 0, 64 }, { 255, 255, 62, 0, 64 }, { 255, 248, 62, 0, 64 }, { 255, 248, 60, 0, 64 }, { 255, 241, 60, 0, 64 }, { 255, 241, 59, 0, 64 }, { 255, 234, 59, 0, 64 }, { 255, 234, 57, 0, 64 }, { 255, 227, 57, 0, 64 }, { 255, 227, 55, 0, 64 }, { 255, 221, 55, 0, 64 }, { 255, 221, 54, 0, 64 }, { 255, 215, 54, 0, 64 }, { 255, 215, 52, 0, 64 }, { 255, 208, 52, 0, 64 }, { 255, 208, 51, 0, 64 }, { 255, 203, 51, 0, 64 }, { 255, 203, 49, 0, 64 }, { 255, 197, 49, 0, 64 }, { 255, 197, 48, 0, 64 }, { 255, 191, 48, 0, 64 }, { 255, 191, 47, 0, 64 }, { 255, 186, 47, 0, 64 }, { 255, 186, 45, 0, 64 }, { 255, 181, 45, 0, 64 }, { 255, 181, 44, 0, 64 }, { 255, 175, 44, 0, 64 }, { 255, 175, 43, 0, 64 }, { 255, 170, 43, 0, 64 }, { 255, 170, 42, 0, 64 }, { 255, 166, 42, 0, 64 }, { 255, 166, 40, 0, 64 }, { 255, 161, 40, 0, 64 }, { 255, 161, 39, 0, 64 }, { 255, 156, 39, 0, 64 }, { 255, 156, 38, 0, 64 }, { 255, 152, 38, 0, 64 }, { 255, 152, 37, 0, 64 }, { 255, 148, 37, 0, 64 }, { 255, 148, 36, 0, 64 }, { 255, 143, 36, 0, 64 }, { 255, 143, 35, 0, 64 }, { 255, 139, 35, 0, 64 }, { 255, 139, 34, 0, 64 }, { 255, 135, 34, 0, 64 }, { 255, 135, 33, 0, 64 }, { 255, 132, 33, 0, 64 }, { 255, 132, 32, 0, 64 }, { 255, 128, 32, 0, 64 } }; static struct bwn_txgain_entry txgain_r0[] = { { 7, 15, 14, 0, 152 }, { 7, 15, 14, 0, 147 }, { 7, 15, 14, 0, 143 }, { 7, 15, 14, 0, 139 }, { 7, 15, 14, 0, 135 }, { 7, 15, 14, 0, 131 }, { 7, 15, 14, 0, 128 }, { 7, 15, 14, 0, 124 }, { 7, 15, 14, 0, 121 }, { 7, 15, 14, 0, 117 }, { 7, 15, 14, 0, 114 }, { 7, 15, 14, 0, 111 }, { 7, 15, 14, 0, 107 }, { 7, 15, 14, 0, 104 }, { 7, 15, 14, 0, 101 }, { 7, 15, 14, 0, 99 }, { 7, 15, 14, 0, 96 }, { 7, 15, 14, 0, 93 }, { 7, 15, 14, 0, 90 }, { 7, 15, 14, 0, 88 }, { 7, 15, 14, 0, 85 }, { 7, 15, 14, 0, 83 }, { 7, 15, 14, 0, 81 }, { 7, 15, 14, 0, 78 }, { 7, 15, 14, 0, 76 }, { 7, 15, 14, 0, 74 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 }, { 7, 15, 13, 0, 68 }, { 7, 15, 13, 0, 66 }, { 7, 15, 13, 0, 64 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 59 }, { 7, 15, 13, 0, 57 }, { 7, 15, 12, 0, 71 }, { 7, 15, 12, 0, 69 }, { 7, 15, 12, 0, 67 }, { 7, 15, 12, 0, 65 }, { 7, 15, 12, 0, 63 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 58 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 70 }, { 7, 15, 11, 0, 68 }, { 7, 15, 11, 0, 66 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 59 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 10, 0, 56 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 60 }, { 7, 15, 9, 0, 59 }, { 7, 14, 9, 0, 72 }, { 7, 14, 9, 0, 70 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 64 }, { 7, 14, 9, 0, 62 }, { 7, 14, 9, 0, 60 }, { 7, 14, 9, 0, 59 }, { 7, 13, 9, 0, 72 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 72 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 12, 8, 0, 72 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 7, 0, 73 }, { 7, 12, 7, 0, 71 }, { 7, 12, 7, 0, 69 }, { 7, 12, 7, 0, 67 }, { 7, 12, 7, 0, 65 }, { 7, 12, 7, 0, 63 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 11, 7, 0, 72 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 65 }, { 7, 11, 7, 0, 63 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 6, 0, 73 }, { 7, 11, 6, 0, 71 } }; static struct bwn_txgain_entry txgain_2ghz_r0[] = { { 4, 15, 9, 0, 64 }, { 4, 15, 9, 0, 62 }, { 4, 15, 9, 0, 60 }, { 4, 15, 9, 0, 59 }, { 4, 14, 9, 0, 72 }, { 4, 14, 9, 0, 70 }, { 4, 14, 9, 0, 68 }, { 4, 14, 9, 0, 66 }, { 4, 14, 9, 0, 64 }, { 4, 14, 9, 0, 62 }, { 4, 14, 9, 0, 60 }, { 4, 14, 9, 0, 59 }, { 4, 13, 9, 0, 72 }, { 4, 13, 9, 0, 70 }, { 4, 13, 9, 0, 68 }, { 4, 13, 9, 0, 66 }, { 4, 13, 9, 0, 64 }, { 4, 13, 9, 0, 63 }, { 4, 13, 9, 0, 61 }, { 4, 13, 9, 0, 59 }, { 4, 13, 9, 0, 57 }, { 4, 13, 8, 0, 72 }, { 4, 13, 8, 0, 70 }, { 4, 13, 8, 0, 68 }, { 4, 13, 8, 0, 66 }, { 4, 13, 8, 0, 64 }, { 4, 13, 8, 0, 62 }, { 4, 13, 8, 0, 60 }, { 4, 13, 8, 0, 59 }, { 4, 12, 8, 0, 72 }, { 4, 12, 8, 0, 70 }, { 4, 12, 8, 0, 68 }, { 4, 12, 8, 0, 66 }, { 4, 12, 8, 0, 64 }, { 4, 12, 8, 0, 62 }, { 4, 12, 8, 0, 61 }, { 4, 12, 8, 0, 59 }, { 4, 12, 7, 0, 73 }, { 4, 12, 7, 0, 71 }, { 4, 12, 7, 0, 69 }, { 4, 12, 7, 0, 67 }, { 4, 12, 7, 0, 65 }, { 4, 12, 7, 0, 63 }, { 4, 12, 7, 0, 61 }, { 4, 12, 7, 0, 59 }, { 4, 11, 7, 0, 72 }, { 4, 11, 7, 0, 70 }, { 4, 11, 7, 0, 68 }, { 4, 11, 7, 0, 66 }, { 4, 11, 7, 0, 65 }, { 4, 11, 7, 0, 63 }, { 4, 11, 7, 0, 61 }, { 4, 11, 7, 0, 59 }, { 4, 11, 6, 0, 73 }, { 4, 11, 6, 0, 71 }, { 4, 11, 6, 0, 69 }, { 4, 11, 6, 0, 67 }, { 4, 11, 6, 0, 65 }, { 4, 11, 6, 0, 63 }, { 4, 11, 6, 0, 61 }, { 4, 11, 6, 0, 60 }, { 4, 10, 6, 0, 72 }, { 4, 10, 6, 0, 70 }, { 4, 10, 6, 0, 68 }, { 4, 10, 6, 0, 66 }, { 4, 10, 6, 0, 64 }, { 4, 10, 6, 0, 62 }, { 4, 10, 6, 0, 60 }, { 4, 10, 6, 0, 59 }, { 4, 10, 5, 0, 72 }, { 4, 10, 5, 0, 70 }, { 4, 10, 5, 0, 68 }, { 4, 10, 5, 0, 66 }, { 4, 10, 5, 0, 64 }, { 4, 10, 5, 0, 62 }, { 4, 10, 5, 0, 60 }, { 4, 10, 5, 0, 59 }, { 4, 9, 5, 0, 70 }, { 4, 9, 5, 0, 68 }, { 4, 9, 5, 0, 66 }, { 4, 9, 5, 0, 64 }, { 4, 9, 5, 0, 63 }, { 4, 9, 5, 0, 61 }, { 4, 9, 5, 0, 59 }, { 4, 9, 4, 0, 71 }, { 4, 9, 4, 0, 69 }, { 4, 9, 4, 0, 67 }, { 4, 9, 4, 0, 65 }, { 4, 9, 4, 0, 63 }, { 4, 9, 4, 0, 62 }, { 4, 9, 4, 0, 60 }, { 4, 9, 4, 0, 58 }, { 4, 8, 4, 0, 70 }, { 4, 8, 4, 0, 68 }, { 4, 8, 4, 0, 66 }, { 4, 8, 4, 0, 65 }, { 4, 8, 4, 0, 63 }, { 4, 8, 4, 0, 61 }, { 4, 8, 4, 0, 59 }, { 4, 7, 4, 0, 68 }, { 4, 7, 4, 0, 66 }, { 4, 7, 4, 0, 64 }, { 4, 7, 4, 0, 62 }, { 4, 7, 4, 0, 61 }, { 4, 7, 4, 0, 59 }, { 4, 7, 3, 0, 67 }, { 4, 7, 3, 0, 65 }, { 4, 7, 3, 0, 63 }, { 4, 7, 3, 0, 62 }, { 4, 7, 3, 0, 60 }, { 4, 6, 3, 0, 65 }, { 4, 6, 3, 0, 63 }, { 4, 6, 3, 0, 61 }, { 4, 6, 3, 0, 60 }, { 4, 6, 3, 0, 58 }, { 4, 5, 3, 0, 68 }, { 4, 5, 3, 0, 66 }, { 4, 5, 3, 0, 64 }, { 4, 5, 3, 0, 62 }, { 4, 5, 3, 0, 60 }, { 4, 5, 3, 0, 59 }, { 4, 5, 3, 0, 57 }, { 4, 4, 2, 0, 83 }, { 4, 4, 2, 0, 81 }, { 4, 4, 2, 0, 78 }, { 4, 4, 2, 0, 76 }, { 4, 4, 2, 0, 74 }, { 4, 4, 2, 0, 72 } }; static struct bwn_txgain_entry txgain_5ghz_r0[] = { { 7, 15, 15, 0, 99 }, { 7, 15, 15, 0, 96 }, { 7, 15, 15, 0, 93 }, { 7, 15, 15, 0, 90 }, { 7, 15, 15, 0, 88 }, { 7, 15, 15, 0, 85 }, { 7, 15, 15, 0, 83 }, { 7, 15, 15, 0, 81 }, { 7, 15, 15, 0, 78 }, { 7, 15, 15, 0, 76 }, { 7, 15, 15, 0, 74 }, { 7, 15, 15, 0, 72 }, { 7, 15, 15, 0, 70 }, { 7, 15, 15, 0, 68 }, { 7, 15, 15, 0, 66 }, { 7, 15, 15, 0, 64 }, { 7, 15, 15, 0, 62 }, { 7, 15, 15, 0, 60 }, { 7, 15, 15, 0, 59 }, { 7, 15, 15, 0, 57 }, { 7, 15, 15, 0, 55 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 58 }, { 7, 15, 14, 0, 56 }, { 7, 15, 14, 0, 55 }, { 7, 15, 13, 0, 71 }, { 7, 15, 13, 0, 69 }, { 7, 15, 13, 0, 67 }, { 7, 15, 13, 0, 65 }, { 7, 15, 13, 0, 63 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 58 }, { 7, 15, 13, 0, 56 }, { 7, 15, 12, 0, 72 }, { 7, 15, 12, 0, 70 }, { 7, 15, 12, 0, 68 }, { 7, 15, 12, 0, 66 }, { 7, 15, 12, 0, 64 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 59 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 73 }, { 7, 15, 11, 0, 71 }, { 7, 15, 11, 0, 69 }, { 7, 15, 11, 0, 67 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 60 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 61 }, { 7, 15, 9, 0, 59 }, { 7, 15, 9, 0, 57 }, { 7, 15, 9, 0, 56 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 65 }, { 7, 14, 9, 0, 63 }, { 7, 14, 9, 0, 61 }, { 7, 14, 9, 0, 59 }, { 7, 14, 9, 0, 58 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 13, 8, 0, 57 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 8, 0, 57 }, { 7, 12, 7, 0, 70 }, { 7, 12, 7, 0, 68 }, { 7, 12, 7, 0, 66 }, { 7, 12, 7, 0, 64 }, { 7, 12, 7, 0, 62 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 12, 7, 0, 57 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 64 }, { 7, 11, 7, 0, 62 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 7, 0, 57 }, { 7, 11, 6, 0, 69 }, { 7, 11, 6, 0, 67 }, { 7, 11, 6, 0, 65 }, { 7, 11, 6, 0, 63 }, { 7, 11, 6, 0, 62 }, { 7, 11, 6, 0, 60 } }; static struct bwn_txgain_entry txgain_r1[] = { { 7, 15, 14, 0, 152 }, { 7, 15, 14, 0, 147 }, { 7, 15, 14, 0, 143 }, { 7, 15, 14, 0, 139 }, { 7, 15, 14, 0, 135 }, { 7, 15, 14, 0, 131 }, { 7, 15, 14, 0, 128 }, { 7, 15, 14, 0, 124 }, { 7, 15, 14, 0, 121 }, { 7, 15, 14, 0, 117 }, { 7, 15, 14, 0, 114 }, { 7, 15, 14, 0, 111 }, { 7, 15, 14, 0, 107 }, { 7, 15, 14, 0, 104 }, { 7, 15, 14, 0, 101 }, { 7, 15, 14, 0, 99 }, { 7, 15, 14, 0, 96 }, { 7, 15, 14, 0, 93 }, { 7, 15, 14, 0, 90 }, { 7, 15, 14, 0, 88 }, { 7, 15, 14, 0, 85 }, { 7, 15, 14, 0, 83 }, { 7, 15, 14, 0, 81 }, { 7, 15, 14, 0, 78 }, { 7, 15, 14, 0, 76 }, { 7, 15, 14, 0, 74 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 59 }, { 7, 15, 14, 0, 57 }, { 7, 15, 13, 0, 72 }, { 7, 15, 13, 0, 70 }, { 7, 15, 13, 0, 68 }, { 7, 15, 13, 0, 66 }, { 7, 15, 13, 0, 64 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 59 }, { 7, 15, 13, 0, 57 }, { 7, 15, 12, 0, 71 }, { 7, 15, 12, 0, 69 }, { 7, 15, 12, 0, 67 }, { 7, 15, 12, 0, 65 }, { 7, 15, 12, 0, 63 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 58 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 70 }, { 7, 15, 11, 0, 68 }, { 7, 15, 11, 0, 66 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 59 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 10, 0, 56 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 60 }, { 7, 15, 9, 0, 59 }, { 7, 14, 9, 0, 72 }, { 7, 14, 9, 0, 70 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 64 }, { 7, 14, 9, 0, 62 }, { 7, 14, 9, 0, 60 }, { 7, 14, 9, 0, 59 }, { 7, 13, 9, 0, 72 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 72 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 12, 8, 0, 72 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 7, 0, 73 }, { 7, 12, 7, 0, 71 }, { 7, 12, 7, 0, 69 }, { 7, 12, 7, 0, 67 }, { 7, 12, 7, 0, 65 }, { 7, 12, 7, 0, 63 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 11, 7, 0, 72 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 65 }, { 7, 11, 7, 0, 63 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 6, 0, 73 }, { 7, 11, 6, 0, 71 } }; static struct bwn_txgain_entry txgain_2ghz_r1[] = { { 4, 15, 15, 0, 90 }, { 4, 15, 15, 0, 88 }, { 4, 15, 15, 0, 85 }, { 4, 15, 15, 0, 83 }, { 4, 15, 15, 0, 81 }, { 4, 15, 15, 0, 78 }, { 4, 15, 15, 0, 76 }, { 4, 15, 15, 0, 74 }, { 4, 15, 15, 0, 72 }, { 4, 15, 15, 0, 70 }, { 4, 15, 15, 0, 68 }, { 4, 15, 15, 0, 66 }, { 4, 15, 15, 0, 64 }, { 4, 15, 15, 0, 62 }, { 4, 15, 15, 0, 60 }, { 4, 15, 15, 0, 59 }, { 4, 15, 14, 0, 72 }, { 4, 15, 14, 0, 70 }, { 4, 15, 14, 0, 68 }, { 4, 15, 14, 0, 66 }, { 4, 15, 14, 0, 64 }, { 4, 15, 14, 0, 62 }, { 4, 15, 14, 0, 60 }, { 4, 15, 14, 0, 59 }, { 4, 15, 13, 0, 72 }, { 4, 15, 13, 0, 70 }, { 4, 15, 13, 0, 68 }, { 4, 15, 13, 0, 66 }, { 4, 15, 13, 0, 64 }, { 4, 15, 13, 0, 62 }, { 4, 15, 13, 0, 60 }, { 4, 15, 13, 0, 59 }, { 4, 15, 12, 0, 72 }, { 4, 15, 12, 0, 70 }, { 4, 15, 12, 0, 68 }, { 4, 15, 12, 0, 66 }, { 4, 15, 12, 0, 64 }, { 4, 15, 12, 0, 62 }, { 4, 15, 12, 0, 60 }, { 4, 15, 12, 0, 59 }, { 4, 15, 11, 0, 72 }, { 4, 15, 11, 0, 70 }, { 4, 15, 11, 0, 68 }, { 4, 15, 11, 0, 66 }, { 4, 15, 11, 0, 64 }, { 4, 15, 11, 0, 62 }, { 4, 15, 11, 0, 60 }, { 4, 15, 11, 0, 59 }, { 4, 15, 10, 0, 72 }, { 4, 15, 10, 0, 70 }, { 4, 15, 10, 0, 68 }, { 4, 15, 10, 0, 66 }, { 4, 15, 10, 0, 64 }, { 4, 15, 10, 0, 62 }, { 4, 15, 10, 0, 60 }, { 4, 15, 10, 0, 59 }, { 4, 15, 9, 0, 72 }, { 4, 15, 9, 0, 70 }, { 4, 15, 9, 0, 68 }, { 4, 15, 9, 0, 66 }, { 4, 15, 9, 0, 64 }, { 4, 15, 9, 0, 62 }, { 4, 15, 9, 0, 60 }, { 4, 15, 9, 0, 59 }, { 4, 14, 9, 0, 72 }, { 4, 14, 9, 0, 70 }, { 4, 14, 9, 0, 68 }, { 4, 14, 9, 0, 66 }, { 4, 14, 9, 0, 64 }, { 4, 14, 9, 0, 62 }, { 4, 14, 9, 0, 60 }, { 4, 14, 9, 0, 59 }, { 4, 13, 9, 0, 72 }, { 4, 13, 9, 0, 70 }, { 4, 13, 9, 0, 68 }, { 4, 13, 9, 0, 66 }, { 4, 13, 9, 0, 64 }, { 4, 13, 9, 0, 63 }, { 4, 13, 9, 0, 61 }, { 4, 13, 9, 0, 59 }, { 4, 13, 9, 0, 57 }, { 4, 13, 8, 0, 72 }, { 4, 13, 8, 0, 70 }, { 4, 13, 8, 0, 68 }, { 4, 13, 8, 0, 66 }, { 4, 13, 8, 0, 64 }, { 4, 13, 8, 0, 62 }, { 4, 13, 8, 0, 60 }, { 4, 13, 8, 0, 59 }, { 4, 12, 8, 0, 72 }, { 4, 12, 8, 0, 70 }, { 4, 12, 8, 0, 68 }, { 4, 12, 8, 0, 66 }, { 4, 12, 8, 0, 64 }, { 4, 12, 8, 0, 62 }, { 4, 12, 8, 0, 61 }, { 4, 12, 8, 0, 59 }, { 4, 12, 7, 0, 73 }, { 4, 12, 7, 0, 71 }, { 4, 12, 7, 0, 69 }, { 4, 12, 7, 0, 67 }, { 4, 12, 7, 0, 65 }, { 4, 12, 7, 0, 63 }, { 4, 12, 7, 0, 61 }, { 4, 12, 7, 0, 59 }, { 4, 11, 7, 0, 72 }, { 4, 11, 7, 0, 70 }, { 4, 11, 7, 0, 68 }, { 4, 11, 7, 0, 66 }, { 4, 11, 7, 0, 65 }, { 4, 11, 7, 0, 63 }, { 4, 11, 7, 0, 61 }, { 4, 11, 7, 0, 59 }, { 4, 11, 6, 0, 73 }, { 4, 11, 6, 0, 71 }, { 4, 11, 6, 0, 69 }, { 4, 11, 6, 0, 67 }, { 4, 11, 6, 0, 65 }, { 4, 11, 6, 0, 63 }, { 4, 11, 6, 0, 61 }, { 4, 11, 6, 0, 60 }, { 4, 10, 6, 0, 72 }, { 4, 10, 6, 0, 70 }, { 4, 10, 6, 0, 68 }, { 4, 10, 6, 0, 66 }, { 4, 10, 6, 0, 64 }, { 4, 10, 6, 0, 62 }, { 4, 10, 6, 0, 60 } }; static struct bwn_txgain_entry txgain_5ghz_r1[] = { { 7, 15, 15, 0, 99 }, { 7, 15, 15, 0, 96 }, { 7, 15, 15, 0, 93 }, { 7, 15, 15, 0, 90 }, { 7, 15, 15, 0, 88 }, { 7, 15, 15, 0, 85 }, { 7, 15, 15, 0, 83 }, { 7, 15, 15, 0, 81 }, { 7, 15, 15, 0, 78 }, { 7, 15, 15, 0, 76 }, { 7, 15, 15, 0, 74 }, { 7, 15, 15, 0, 72 }, { 7, 15, 15, 0, 70 }, { 7, 15, 15, 0, 68 }, { 7, 15, 15, 0, 66 }, { 7, 15, 15, 0, 64 }, { 7, 15, 15, 0, 62 }, { 7, 15, 15, 0, 60 }, { 7, 15, 15, 0, 59 }, { 7, 15, 15, 0, 57 }, { 7, 15, 15, 0, 55 }, { 7, 15, 14, 0, 72 }, { 7, 15, 14, 0, 70 }, { 7, 15, 14, 0, 68 }, { 7, 15, 14, 0, 66 }, { 7, 15, 14, 0, 64 }, { 7, 15, 14, 0, 62 }, { 7, 15, 14, 0, 60 }, { 7, 15, 14, 0, 58 }, { 7, 15, 14, 0, 56 }, { 7, 15, 14, 0, 55 }, { 7, 15, 13, 0, 71 }, { 7, 15, 13, 0, 69 }, { 7, 15, 13, 0, 67 }, { 7, 15, 13, 0, 65 }, { 7, 15, 13, 0, 63 }, { 7, 15, 13, 0, 62 }, { 7, 15, 13, 0, 60 }, { 7, 15, 13, 0, 58 }, { 7, 15, 13, 0, 56 }, { 7, 15, 12, 0, 72 }, { 7, 15, 12, 0, 70 }, { 7, 15, 12, 0, 68 }, { 7, 15, 12, 0, 66 }, { 7, 15, 12, 0, 64 }, { 7, 15, 12, 0, 62 }, { 7, 15, 12, 0, 60 }, { 7, 15, 12, 0, 59 }, { 7, 15, 12, 0, 57 }, { 7, 15, 11, 0, 73 }, { 7, 15, 11, 0, 71 }, { 7, 15, 11, 0, 69 }, { 7, 15, 11, 0, 67 }, { 7, 15, 11, 0, 65 }, { 7, 15, 11, 0, 63 }, { 7, 15, 11, 0, 61 }, { 7, 15, 11, 0, 60 }, { 7, 15, 11, 0, 58 }, { 7, 15, 10, 0, 71 }, { 7, 15, 10, 0, 69 }, { 7, 15, 10, 0, 67 }, { 7, 15, 10, 0, 65 }, { 7, 15, 10, 0, 63 }, { 7, 15, 10, 0, 61 }, { 7, 15, 10, 0, 60 }, { 7, 15, 10, 0, 58 }, { 7, 15, 9, 0, 70 }, { 7, 15, 9, 0, 68 }, { 7, 15, 9, 0, 66 }, { 7, 15, 9, 0, 64 }, { 7, 15, 9, 0, 62 }, { 7, 15, 9, 0, 61 }, { 7, 15, 9, 0, 59 }, { 7, 15, 9, 0, 57 }, { 7, 15, 9, 0, 56 }, { 7, 14, 9, 0, 68 }, { 7, 14, 9, 0, 66 }, { 7, 14, 9, 0, 65 }, { 7, 14, 9, 0, 63 }, { 7, 14, 9, 0, 61 }, { 7, 14, 9, 0, 59 }, { 7, 14, 9, 0, 58 }, { 7, 13, 9, 0, 70 }, { 7, 13, 9, 0, 68 }, { 7, 13, 9, 0, 66 }, { 7, 13, 9, 0, 64 }, { 7, 13, 9, 0, 63 }, { 7, 13, 9, 0, 61 }, { 7, 13, 9, 0, 59 }, { 7, 13, 9, 0, 57 }, { 7, 13, 8, 0, 70 }, { 7, 13, 8, 0, 68 }, { 7, 13, 8, 0, 66 }, { 7, 13, 8, 0, 64 }, { 7, 13, 8, 0, 62 }, { 7, 13, 8, 0, 60 }, { 7, 13, 8, 0, 59 }, { 7, 13, 8, 0, 57 }, { 7, 12, 8, 0, 70 }, { 7, 12, 8, 0, 68 }, { 7, 12, 8, 0, 66 }, { 7, 12, 8, 0, 64 }, { 7, 12, 8, 0, 62 }, { 7, 12, 8, 0, 61 }, { 7, 12, 8, 0, 59 }, { 7, 12, 8, 0, 57 }, { 7, 12, 7, 0, 70 }, { 7, 12, 7, 0, 68 }, { 7, 12, 7, 0, 66 }, { 7, 12, 7, 0, 64 }, { 7, 12, 7, 0, 62 }, { 7, 12, 7, 0, 61 }, { 7, 12, 7, 0, 59 }, { 7, 12, 7, 0, 57 }, { 7, 11, 7, 0, 70 }, { 7, 11, 7, 0, 68 }, { 7, 11, 7, 0, 66 }, { 7, 11, 7, 0, 64 }, { 7, 11, 7, 0, 62 }, { 7, 11, 7, 0, 61 }, { 7, 11, 7, 0, 59 }, { 7, 11, 7, 0, 57 }, { 7, 11, 6, 0, 69 }, { 7, 11, 6, 0, 67 }, { 7, 11, 6, 0, 65 }, { 7, 11, 6, 0, 63 }, { 7, 11, 6, 0, 62 }, { 7, 11, 6, 0, 60 } }; if (mac->mac_phy.rev != 0 && mac->mac_phy.rev != 1) { if (siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r2); else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r2); else bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r2); return; } if (mac->mac_phy.rev == 0) { if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) || (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_HGPA)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r0); else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r0); else bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r0); return; } if ((siba_sprom_get_bf_hi(sc->sc_dev) & BWN_BFH_NOPA) || (siba_sprom_get_bf_lo(sc->sc_dev) & BWN_BFL_HGPA)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_r1); else if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_2ghz_r1); else bwn_phy_lp_gaintbl_write_multi(mac, 0, 128, txgain_5ghz_r1); } static void bwn_tab_write(struct bwn_mac *mac, uint32_t typeoffset, uint32_t value) { uint32_t offset, type; type = BWN_TAB_GETTYPE(typeoffset); offset = BWN_TAB_GETOFFSET(typeoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); switch (type) { case BWN_TAB_8BIT: KASSERT(!(value & ~0xff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_16BIT: KASSERT(!(value & ~0xffff), ("%s:%d: fail", __func__, __LINE__)); BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; case BWN_TAB_32BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATAHI, value >> 16); BWN_PHY_WRITE(mac, BWN_PHY_TABLEDATALO, value); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); } } static int bwn_phy_lp_loopback(struct bwn_mac *mac) { struct bwn_phy_lp_iq_est ie; int i, index = -1; uint32_t tmp; memset(&ie, 0, sizeof(ie)); bwn_phy_lp_set_trsw_over(mac, 1, 1); BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 1); BWN_PHY_MASK(mac, BWN_PHY_AFE_CTL_OVRVAL, 0xfffe); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x800); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x800); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x8); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x8); BWN_RF_WRITE(mac, BWN_B2062_N_TXCTL_A, 0x80); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_0, 0x80); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_VAL_0, 0x80); for (i = 0; i < 32; i++) { bwn_phy_lp_set_rxgain_idx(mac, i); bwn_phy_lp_ddfs_turnon(mac, 1, 1, 5, 5, 0); if (!(bwn_phy_lp_rx_iq_est(mac, 1000, 32, &ie))) continue; tmp = (ie.ie_ipwr + ie.ie_qpwr) / 1000; if ((tmp > 4000) && (tmp < 10000)) { index = i; break; } } bwn_phy_lp_ddfs_turnoff(mac); return (index); } static void bwn_phy_lp_set_rxgain_idx(struct bwn_mac *mac, uint16_t idx) { bwn_phy_lp_set_rxgain(mac, bwn_tab_read(mac, BWN_TAB_2(12, idx))); } static void bwn_phy_lp_ddfs_turnon(struct bwn_mac *mac, int i_on, int q_on, int incr1, int incr2, int scale_idx) { bwn_phy_lp_ddfs_turnoff(mac); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS_POINTER_INIT, 0xff80); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS_POINTER_INIT, 0x80ff); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS_INCR_INIT, 0xff80, incr1); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS_INCR_INIT, 0x80ff, incr2 << 8); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xfff7, i_on << 3); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xffef, q_on << 4); BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DDFS, 0xff9f, scale_idx << 5); BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0xfffb); BWN_PHY_SET(mac, BWN_PHY_AFE_DDFS, 0x2); BWN_PHY_SET(mac, BWN_PHY_LP_PHY_CTL, 0x20); } static uint8_t bwn_phy_lp_rx_iq_est(struct bwn_mac *mac, uint16_t sample, uint8_t time, struct bwn_phy_lp_iq_est *ie) { int i; BWN_PHY_MASK(mac, BWN_PHY_CRSGAIN_CTL, 0xfff7); BWN_PHY_WRITE(mac, BWN_PHY_IQ_NUM_SMPLS_ADDR, sample); BWN_PHY_SETMASK(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xff00, time); BWN_PHY_MASK(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xfeff); BWN_PHY_SET(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR, 0x200); for (i = 0; i < 500; i++) { if (!(BWN_PHY_READ(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) break; DELAY(1000); } if ((BWN_PHY_READ(mac, BWN_PHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) { BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x8); return 0; } ie->ie_iqprod = BWN_PHY_READ(mac, BWN_PHY_IQ_ACC_HI_ADDR); ie->ie_iqprod <<= 16; ie->ie_iqprod |= BWN_PHY_READ(mac, BWN_PHY_IQ_ACC_LO_ADDR); ie->ie_ipwr = BWN_PHY_READ(mac, BWN_PHY_IQ_I_PWR_ACC_HI_ADDR); ie->ie_ipwr <<= 16; ie->ie_ipwr |= BWN_PHY_READ(mac, BWN_PHY_IQ_I_PWR_ACC_LO_ADDR); ie->ie_qpwr = BWN_PHY_READ(mac, BWN_PHY_IQ_Q_PWR_ACC_HI_ADDR); ie->ie_qpwr <<= 16; ie->ie_qpwr |= BWN_PHY_READ(mac, BWN_PHY_IQ_Q_PWR_ACC_LO_ADDR); BWN_PHY_SET(mac, BWN_PHY_CRSGAIN_CTL, 0x8); return 1; } static uint32_t bwn_tab_read(struct bwn_mac *mac, uint32_t typeoffset) { uint32_t offset, type, value; type = BWN_TAB_GETTYPE(typeoffset); offset = BWN_TAB_GETOFFSET(typeoffset); KASSERT(offset <= 0xffff, ("%s:%d: fail", __func__, __LINE__)); switch (type) { case BWN_TAB_8BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO) & 0xff; break; case BWN_TAB_16BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); break; case BWN_TAB_32BIT: BWN_PHY_WRITE(mac, BWN_PHY_TABLE_ADDR, offset); value = BWN_PHY_READ(mac, BWN_PHY_TABLEDATAHI); value <<= 16; value |= BWN_PHY_READ(mac, BWN_PHY_TABLEDATALO); break; default: KASSERT(0 == 1, ("%s:%d: fail", __func__, __LINE__)); value = 0; } return (value); } static void bwn_phy_lp_ddfs_turnoff(struct bwn_mac *mac) { BWN_PHY_MASK(mac, BWN_PHY_AFE_DDFS, 0xfffd); BWN_PHY_MASK(mac, BWN_PHY_LP_PHY_CTL, 0xffdf); } static void bwn_phy_lp_set_txgain_dac(struct bwn_mac *mac, uint16_t dac) { uint16_t ctl; ctl = BWN_PHY_READ(mac, BWN_PHY_AFE_DAC_CTL) & 0xc7f; ctl |= dac << 7; BWN_PHY_SETMASK(mac, BWN_PHY_AFE_DAC_CTL, 0xf000, ctl); } static void bwn_phy_lp_set_txgain_pa(struct bwn_mac *mac, uint16_t gain) { BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfb), 0xe03f, gain << 6); BWN_PHY_SETMASK(mac, BWN_PHY_OFDM(0xfd), 0x80ff, gain << 8); } static void bwn_phy_lp_set_txgain_override(struct bwn_mac *mac) { if (mac->mac_phy.rev < 2) BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x100); else { BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x80); BWN_PHY_SET(mac, BWN_PHY_RF_OVERRIDE_2, 0x4000); } BWN_PHY_SET(mac, BWN_PHY_AFE_CTL_OVR, 0x40); } static uint16_t bwn_phy_lp_get_pa_gain(struct bwn_mac *mac) { return BWN_PHY_READ(mac, BWN_PHY_OFDM(0xfb)) & 0x7f; } static uint8_t bwn_nbits(int32_t val) { uint32_t tmp; uint8_t nbits = 0; for (tmp = abs(val); tmp != 0; tmp >>= 1) nbits++; return (nbits); } static void bwn_phy_lp_gaintbl_write_multi(struct bwn_mac *mac, int offset, int count, struct bwn_txgain_entry *table) { int i; for (i = offset; i < count; i++) bwn_phy_lp_gaintbl_write(mac, i, table[i]); } static void bwn_phy_lp_gaintbl_write(struct bwn_mac *mac, int offset, struct bwn_txgain_entry data) { if (mac->mac_phy.rev >= 2) bwn_phy_lp_gaintbl_write_r2(mac, offset, data); else bwn_phy_lp_gaintbl_write_r01(mac, offset, data); } static void bwn_phy_lp_gaintbl_write_r2(struct bwn_mac *mac, int offset, struct bwn_txgain_entry te) { struct bwn_softc *sc = mac->mac_sc; struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; KASSERT(mac->mac_phy.rev >= 2, ("%s:%d: fail", __func__, __LINE__)); tmp = (te.te_pad << 16) | (te.te_pga << 8) | te.te_gm; if (mac->mac_phy.rev >= 3) { tmp |= ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ? (0x10 << 24) : (0x70 << 24)); } else { tmp |= ((IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) ? (0x14 << 24) : (0x7f << 24)); } bwn_tab_write(mac, BWN_TAB_4(7, 0xc0 + offset), tmp); bwn_tab_write(mac, BWN_TAB_4(7, 0x140 + offset), te.te_bbmult << 20 | te.te_dac << 28); } static void bwn_phy_lp_gaintbl_write_r01(struct bwn_mac *mac, int offset, struct bwn_txgain_entry te) { KASSERT(mac->mac_phy.rev < 2, ("%s:%d: fail", __func__, __LINE__)); bwn_tab_write(mac, BWN_TAB_4(10, 0xc0 + offset), (te.te_pad << 11) | (te.te_pga << 7) | (te.te_gm << 4) | te.te_dac); bwn_tab_write(mac, BWN_TAB_4(10, 0x140 + offset), te.te_bbmult << 20); } Index: head/sys/dev/ciss/ciss.c =================================================================== --- head/sys/dev/ciss/ciss.c (revision 327948) +++ head/sys/dev/ciss/ciss.c (revision 327949) @@ -1,4733 +1,4733 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2001 Michael Smith * Copyright (c) 2004 Paul Saab * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ /* * Common Interface for SCSI-3 Support driver. * * CISS claims to provide a common interface between a generic SCSI * transport and an intelligent host adapter. * * This driver supports CISS as defined in the document "CISS Command * Interface for SCSI-3 Support Open Specification", Version 1.04, * Valence Number 1, dated 20001127, produced by Compaq Computer * Corporation. This document appears to be a hastily and somewhat * arbitrarlily cut-down version of a larger (and probably even more * chaotic and inconsistent) Compaq internal document. Various * details were also gleaned from Compaq's "cciss" driver for Linux. * * We provide a shim layer between the CISS interface and CAM, * offloading most of the queueing and being-a-disk chores onto CAM. * Entry to the driver is via the PCI bus attachment (ciss_probe, * ciss_attach, etc) and via the CAM interface (ciss_cam_action, * ciss_cam_poll). The Compaq CISS adapters are, however, poor SCSI * citizens and we have to fake up some responses to get reasonable * behaviour out of them. In addition, the CISS command set is by no * means adequate to support the functionality of a RAID controller, * and thus the supported Compaq adapters utilise portions of the * control protocol from earlier Compaq adapter families. * * Note that we only support the "simple" transport layer over PCI. * This interface (ab)uses the I2O register set (specifically the post * queues) to exchange commands with the adapter. Other interfaces * are available, but we aren't supposed to know about them, and it is * dubious whether they would provide major performance improvements * except under extreme load. * * Currently the only supported CISS adapters are the Compaq Smart * Array 5* series (5300, 5i, 532). Even with only three adapters, * Compaq still manage to have interface variations. * * * Thanks must go to Fred Harris and Darryl DeVinney at Compaq, as * well as Paul Saab at Yahoo! for their assistance in making this * driver happen. * * More thanks must go to John Cagle at HP for the countless hours * spent making this driver "work" with the MSA* series storage * enclosures. Without his help (and nagging), this driver could not * be used with these enclosures. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static MALLOC_DEFINE(CISS_MALLOC_CLASS, "ciss_data", "ciss internal data buffers"); /* pci interface */ static int ciss_lookup(device_t dev); static int ciss_probe(device_t dev); static int ciss_attach(device_t dev); static int ciss_detach(device_t dev); static int ciss_shutdown(device_t dev); /* (de)initialisation functions, control wrappers */ static int ciss_init_pci(struct ciss_softc *sc); static int ciss_setup_msix(struct ciss_softc *sc); static int ciss_init_perf(struct ciss_softc *sc); static int ciss_wait_adapter(struct ciss_softc *sc); static int ciss_flush_adapter(struct ciss_softc *sc); static int ciss_init_requests(struct ciss_softc *sc); static void ciss_command_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int ciss_identify_adapter(struct ciss_softc *sc); static int ciss_init_logical(struct ciss_softc *sc); static int ciss_init_physical(struct ciss_softc *sc); static int ciss_filter_physical(struct ciss_softc *sc, struct ciss_lun_report *cll); static int ciss_identify_logical(struct ciss_softc *sc, struct ciss_ldrive *ld); static int ciss_get_ldrive_status(struct ciss_softc *sc, struct ciss_ldrive *ld); static int ciss_update_config(struct ciss_softc *sc); static int ciss_accept_media(struct ciss_softc *sc, struct ciss_ldrive *ld); static void ciss_init_sysctl(struct ciss_softc *sc); static void ciss_soft_reset(struct ciss_softc *sc); static void ciss_free(struct ciss_softc *sc); static void ciss_spawn_notify_thread(struct ciss_softc *sc); static void ciss_kill_notify_thread(struct ciss_softc *sc); /* request submission/completion */ static int ciss_start(struct ciss_request *cr); static void ciss_done(struct ciss_softc *sc, cr_qhead_t *qh); static void ciss_perf_done(struct ciss_softc *sc, cr_qhead_t *qh); static void ciss_intr(void *arg); static void ciss_perf_intr(void *arg); static void ciss_perf_msi_intr(void *arg); static void ciss_complete(struct ciss_softc *sc, cr_qhead_t *qh); static int _ciss_report_request(struct ciss_request *cr, int *command_status, int *scsi_status, const char *func); static int ciss_synch_request(struct ciss_request *cr, int timeout); static int ciss_poll_request(struct ciss_request *cr, int timeout); static int ciss_wait_request(struct ciss_request *cr, int timeout); #if 0 static int ciss_abort_request(struct ciss_request *cr); #endif /* request queueing */ static int ciss_get_request(struct ciss_softc *sc, struct ciss_request **crp); static void ciss_preen_command(struct ciss_request *cr); static void ciss_release_request(struct ciss_request *cr); /* request helpers */ static int ciss_get_bmic_request(struct ciss_softc *sc, struct ciss_request **crp, int opcode, void **bufp, size_t bufsize); static int ciss_user_command(struct ciss_softc *sc, IOCTL_Command_struct *ioc); /* DMA map/unmap */ static int ciss_map_request(struct ciss_request *cr); static void ciss_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void ciss_unmap_request(struct ciss_request *cr); /* CAM interface */ static int ciss_cam_init(struct ciss_softc *sc); static void ciss_cam_rescan_target(struct ciss_softc *sc, int bus, int target); static void ciss_cam_action(struct cam_sim *sim, union ccb *ccb); static int ciss_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio); static int ciss_cam_emulate(struct ciss_softc *sc, struct ccb_scsiio *csio); static void ciss_cam_poll(struct cam_sim *sim); static void ciss_cam_complete(struct ciss_request *cr); static void ciss_cam_complete_fixup(struct ciss_softc *sc, struct ccb_scsiio *csio); static int ciss_name_device(struct ciss_softc *sc, int bus, int target); /* periodic status monitoring */ static void ciss_periodic(void *arg); static void ciss_nop_complete(struct ciss_request *cr); static void ciss_disable_adapter(struct ciss_softc *sc); static void ciss_notify_event(struct ciss_softc *sc); static void ciss_notify_complete(struct ciss_request *cr); static int ciss_notify_abort(struct ciss_softc *sc); static int ciss_notify_abort_bmic(struct ciss_softc *sc); static void ciss_notify_hotplug(struct ciss_softc *sc, struct ciss_notify *cn); static void ciss_notify_logical(struct ciss_softc *sc, struct ciss_notify *cn); static void ciss_notify_physical(struct ciss_softc *sc, struct ciss_notify *cn); /* debugging output */ static void ciss_print_request(struct ciss_request *cr); static void ciss_print_ldrive(struct ciss_softc *sc, struct ciss_ldrive *ld); static const char *ciss_name_ldrive_status(int status); static int ciss_decode_ldrive_status(int status); static const char *ciss_name_ldrive_org(int org); static const char *ciss_name_command_status(int status); /* * PCI bus interface. */ static device_method_t ciss_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ciss_probe), DEVMETHOD(device_attach, ciss_attach), DEVMETHOD(device_detach, ciss_detach), DEVMETHOD(device_shutdown, ciss_shutdown), { 0, 0 } }; static driver_t ciss_pci_driver = { "ciss", ciss_methods, sizeof(struct ciss_softc) }; static devclass_t ciss_devclass; DRIVER_MODULE(ciss, pci, ciss_pci_driver, ciss_devclass, 0, 0); MODULE_DEPEND(ciss, cam, 1, 1, 1); MODULE_DEPEND(ciss, pci, 1, 1, 1); /* * Control device interface. */ static d_open_t ciss_open; static d_close_t ciss_close; static d_ioctl_t ciss_ioctl; static struct cdevsw ciss_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = ciss_open, .d_close = ciss_close, .d_ioctl = ciss_ioctl, .d_name = "ciss", }; /* * This tunable can be set at boot time and controls whether physical devices * that are marked hidden by the firmware should be exposed anyways. */ static unsigned int ciss_expose_hidden_physical = 0; TUNABLE_INT("hw.ciss.expose_hidden_physical", &ciss_expose_hidden_physical); static unsigned int ciss_nop_message_heartbeat = 0; TUNABLE_INT("hw.ciss.nop_message_heartbeat", &ciss_nop_message_heartbeat); /* * This tunable can force a particular transport to be used: * <= 0 : use default * 1 : force simple * 2 : force performant */ static int ciss_force_transport = 0; TUNABLE_INT("hw.ciss.force_transport", &ciss_force_transport); /* * This tunable can force a particular interrupt delivery method to be used: * <= 0 : use default * 1 : force INTx * 2 : force MSIX */ static int ciss_force_interrupt = 0; TUNABLE_INT("hw.ciss.force_interrupt", &ciss_force_interrupt); /************************************************************************ * CISS adapters amazingly don't have a defined programming interface * value. (One could say some very despairing things about PCI and * people just not getting the general idea.) So we are forced to * stick with matching against subvendor/subdevice, and thus have to * be updated for every new CISS adapter that appears. */ #define CISS_BOARD_UNKNWON 0 #define CISS_BOARD_SA5 1 #define CISS_BOARD_SA5B 2 #define CISS_BOARD_NOMSI (1<<4) #define CISS_BOARD_SIMPLE (1<<5) static struct { u_int16_t subvendor; u_int16_t subdevice; int flags; char *desc; } ciss_vendor_data[] = { { 0x0e11, 0x4070, CISS_BOARD_SA5|CISS_BOARD_NOMSI|CISS_BOARD_SIMPLE, "Compaq Smart Array 5300" }, { 0x0e11, 0x4080, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "Compaq Smart Array 5i" }, { 0x0e11, 0x4082, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "Compaq Smart Array 532" }, { 0x0e11, 0x4083, CISS_BOARD_SA5B|CISS_BOARD_NOMSI, "HP Smart Array 5312" }, { 0x0e11, 0x4091, CISS_BOARD_SA5, "HP Smart Array 6i" }, { 0x0e11, 0x409A, CISS_BOARD_SA5, "HP Smart Array 641" }, { 0x0e11, 0x409B, CISS_BOARD_SA5, "HP Smart Array 642" }, { 0x0e11, 0x409C, CISS_BOARD_SA5, "HP Smart Array 6400" }, { 0x0e11, 0x409D, CISS_BOARD_SA5, "HP Smart Array 6400 EM" }, { 0x103C, 0x3211, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3212, CISS_BOARD_SA5, "HP Smart Array E200" }, { 0x103C, 0x3213, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3214, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3215, CISS_BOARD_SA5, "HP Smart Array E200i" }, { 0x103C, 0x3220, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3222, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3223, CISS_BOARD_SA5, "HP Smart Array P800" }, { 0x103C, 0x3225, CISS_BOARD_SA5, "HP Smart Array P600" }, { 0x103C, 0x3230, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3231, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3232, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3233, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3234, CISS_BOARD_SA5, "HP Smart Array P400" }, { 0x103C, 0x3235, CISS_BOARD_SA5, "HP Smart Array P400i" }, { 0x103C, 0x3236, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3237, CISS_BOARD_SA5, "HP Smart Array E500" }, { 0x103C, 0x3238, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3239, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323A, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323B, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323C, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x323D, CISS_BOARD_SA5, "HP Smart Array P700m" }, { 0x103C, 0x3241, CISS_BOARD_SA5, "HP Smart Array P212" }, { 0x103C, 0x3243, CISS_BOARD_SA5, "HP Smart Array P410" }, { 0x103C, 0x3245, CISS_BOARD_SA5, "HP Smart Array P410i" }, { 0x103C, 0x3247, CISS_BOARD_SA5, "HP Smart Array P411" }, { 0x103C, 0x3249, CISS_BOARD_SA5, "HP Smart Array P812" }, { 0x103C, 0x324A, CISS_BOARD_SA5, "HP Smart Array P712m" }, { 0x103C, 0x324B, CISS_BOARD_SA5, "HP Smart Array" }, { 0x103C, 0x3350, CISS_BOARD_SA5, "HP Smart Array P222" }, { 0x103C, 0x3351, CISS_BOARD_SA5, "HP Smart Array P420" }, { 0x103C, 0x3352, CISS_BOARD_SA5, "HP Smart Array P421" }, { 0x103C, 0x3353, CISS_BOARD_SA5, "HP Smart Array P822" }, { 0x103C, 0x3354, CISS_BOARD_SA5, "HP Smart Array P420i" }, { 0x103C, 0x3355, CISS_BOARD_SA5, "HP Smart Array P220i" }, { 0x103C, 0x3356, CISS_BOARD_SA5, "HP Smart Array P721m" }, { 0x103C, 0x1920, CISS_BOARD_SA5, "HP Smart Array P430i" }, { 0x103C, 0x1921, CISS_BOARD_SA5, "HP Smart Array P830i" }, { 0x103C, 0x1922, CISS_BOARD_SA5, "HP Smart Array P430" }, { 0x103C, 0x1923, CISS_BOARD_SA5, "HP Smart Array P431" }, { 0x103C, 0x1924, CISS_BOARD_SA5, "HP Smart Array P830" }, { 0x103C, 0x1926, CISS_BOARD_SA5, "HP Smart Array P731m" }, { 0x103C, 0x1928, CISS_BOARD_SA5, "HP Smart Array P230i" }, { 0x103C, 0x1929, CISS_BOARD_SA5, "HP Smart Array P530" }, { 0x103C, 0x192A, CISS_BOARD_SA5, "HP Smart Array P531" }, { 0x103C, 0x21BD, CISS_BOARD_SA5, "HP Smart Array P244br" }, { 0x103C, 0x21BE, CISS_BOARD_SA5, "HP Smart Array P741m" }, { 0x103C, 0x21BF, CISS_BOARD_SA5, "HP Smart Array H240ar" }, { 0x103C, 0x21C0, CISS_BOARD_SA5, "HP Smart Array P440ar" }, { 0x103C, 0x21C1, CISS_BOARD_SA5, "HP Smart Array P840ar" }, { 0x103C, 0x21C2, CISS_BOARD_SA5, "HP Smart Array P440" }, { 0x103C, 0x21C3, CISS_BOARD_SA5, "HP Smart Array P441" }, { 0x103C, 0x21C5, CISS_BOARD_SA5, "HP Smart Array P841" }, { 0x103C, 0x21C6, CISS_BOARD_SA5, "HP Smart Array H244br" }, { 0x103C, 0x21C7, CISS_BOARD_SA5, "HP Smart Array H240" }, { 0x103C, 0x21C8, CISS_BOARD_SA5, "HP Smart Array H241" }, { 0x103C, 0x21CA, CISS_BOARD_SA5, "HP Smart Array P246br" }, { 0x103C, 0x21CB, CISS_BOARD_SA5, "HP Smart Array P840" }, { 0x103C, 0x21CC, CISS_BOARD_SA5, "HP Smart Array TBD" }, { 0x103C, 0x21CD, CISS_BOARD_SA5, "HP Smart Array P240nr" }, { 0x103C, 0x21CE, CISS_BOARD_SA5, "HP Smart Array H240nr" }, { 0, 0, 0, NULL } }; /************************************************************************ * Find a match for the device in our list of known adapters. */ static int ciss_lookup(device_t dev) { int i; for (i = 0; ciss_vendor_data[i].desc != NULL; i++) if ((pci_get_subvendor(dev) == ciss_vendor_data[i].subvendor) && (pci_get_subdevice(dev) == ciss_vendor_data[i].subdevice)) { return(i); } return(-1); } /************************************************************************ * Match a known CISS adapter. */ static int ciss_probe(device_t dev) { int i; i = ciss_lookup(dev); if (i != -1) { device_set_desc(dev, ciss_vendor_data[i].desc); return(BUS_PROBE_DEFAULT); } return(ENOENT); } /************************************************************************ * Attach the driver to this adapter. */ static int ciss_attach(device_t dev) { struct ciss_softc *sc; int error; debug_called(1); #ifdef CISS_DEBUG /* print structure/union sizes */ debug_struct(ciss_command); debug_struct(ciss_header); debug_union(ciss_device_address); debug_struct(ciss_cdb); debug_struct(ciss_report_cdb); debug_struct(ciss_notify_cdb); debug_struct(ciss_notify); debug_struct(ciss_message_cdb); debug_struct(ciss_error_info_pointer); debug_struct(ciss_error_info); debug_struct(ciss_sg_entry); debug_struct(ciss_config_table); debug_struct(ciss_bmic_cdb); debug_struct(ciss_bmic_id_ldrive); debug_struct(ciss_bmic_id_lstatus); debug_struct(ciss_bmic_id_table); debug_struct(ciss_bmic_id_pdrive); debug_struct(ciss_bmic_blink_pdrive); debug_struct(ciss_bmic_flush_cache); debug_const(CISS_MAX_REQUESTS); debug_const(CISS_MAX_LOGICAL); debug_const(CISS_INTERRUPT_COALESCE_DELAY); debug_const(CISS_INTERRUPT_COALESCE_COUNT); debug_const(CISS_COMMAND_ALLOC_SIZE); debug_const(CISS_COMMAND_SG_LENGTH); debug_type(cciss_pci_info_struct); debug_type(cciss_coalint_struct); debug_type(cciss_coalint_struct); debug_type(NodeName_type); debug_type(NodeName_type); debug_type(Heartbeat_type); debug_type(BusTypes_type); debug_type(FirmwareVer_type); debug_type(DriverVer_type); debug_type(IOCTL_Command_struct); #endif sc = device_get_softc(dev); sc->ciss_dev = dev; mtx_init(&sc->ciss_mtx, "cissmtx", NULL, MTX_DEF); callout_init_mtx(&sc->ciss_periodic, &sc->ciss_mtx, 0); /* * Do PCI-specific init. */ if ((error = ciss_init_pci(sc)) != 0) goto out; /* * Initialise driver queues. */ ciss_initq_free(sc); ciss_initq_notify(sc); /* * Initialize device sysctls. */ ciss_init_sysctl(sc); /* * Initialise command/request pool. */ if ((error = ciss_init_requests(sc)) != 0) goto out; /* * Get adapter information. */ if ((error = ciss_identify_adapter(sc)) != 0) goto out; /* * Find all the physical devices. */ if ((error = ciss_init_physical(sc)) != 0) goto out; /* * Build our private table of logical devices. */ if ((error = ciss_init_logical(sc)) != 0) goto out; /* * Enable interrupts so that the CAM scan can complete. */ CISS_TL_SIMPLE_ENABLE_INTERRUPTS(sc); /* * Initialise the CAM interface. */ if ((error = ciss_cam_init(sc)) != 0) goto out; /* * Start the heartbeat routine and event chain. */ ciss_periodic(sc); /* * Create the control device. */ sc->ciss_dev_t = make_dev(&ciss_cdevsw, device_get_unit(sc->ciss_dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "ciss%d", device_get_unit(sc->ciss_dev)); sc->ciss_dev_t->si_drv1 = sc; /* * The adapter is running; synchronous commands can now sleep * waiting for an interrupt to signal completion. */ sc->ciss_flags |= CISS_FLAG_RUNNING; ciss_spawn_notify_thread(sc); error = 0; out: if (error != 0) { /* ciss_free() expects the mutex to be held */ mtx_lock(&sc->ciss_mtx); ciss_free(sc); } return(error); } /************************************************************************ * Detach the driver from this adapter. */ static int ciss_detach(device_t dev) { struct ciss_softc *sc = device_get_softc(dev); debug_called(1); mtx_lock(&sc->ciss_mtx); if (sc->ciss_flags & CISS_FLAG_CONTROL_OPEN) { mtx_unlock(&sc->ciss_mtx); return (EBUSY); } /* flush adapter cache */ ciss_flush_adapter(sc); /* release all resources. The mutex is released and freed here too. */ ciss_free(sc); return(0); } /************************************************************************ * Prepare adapter for system shutdown. */ static int ciss_shutdown(device_t dev) { struct ciss_softc *sc = device_get_softc(dev); debug_called(1); mtx_lock(&sc->ciss_mtx); /* flush adapter cache */ ciss_flush_adapter(sc); if (sc->ciss_soft_reset) ciss_soft_reset(sc); mtx_unlock(&sc->ciss_mtx); return(0); } static void ciss_init_sysctl(struct ciss_softc *sc) { SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->ciss_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->ciss_dev)), OID_AUTO, "soft_reset", CTLFLAG_RW, &sc->ciss_soft_reset, 0, ""); } /************************************************************************ * Perform PCI-specific attachment actions. */ static int ciss_init_pci(struct ciss_softc *sc) { uintptr_t cbase, csize, cofs; uint32_t method, supported_methods; int error, sqmask, i; void *intr; debug_called(1); /* * Work out adapter type. */ i = ciss_lookup(sc->ciss_dev); if (i < 0) { ciss_printf(sc, "unknown adapter type\n"); return (ENXIO); } if (ciss_vendor_data[i].flags & CISS_BOARD_SA5) { sqmask = CISS_TL_SIMPLE_INTR_OPQ_SA5; } else if (ciss_vendor_data[i].flags & CISS_BOARD_SA5B) { sqmask = CISS_TL_SIMPLE_INTR_OPQ_SA5B; } else { /* * XXX Big hammer, masks/unmasks all possible interrupts. This should * work on all hardware variants. Need to add code to handle the * "controller crashed" interrupt bit that this unmasks. */ sqmask = ~0; } /* * Allocate register window first (we need this to find the config * struct). */ error = ENXIO; sc->ciss_regs_rid = CISS_TL_SIMPLE_BAR_REGS; if ((sc->ciss_regs_resource = bus_alloc_resource_any(sc->ciss_dev, SYS_RES_MEMORY, &sc->ciss_regs_rid, RF_ACTIVE)) == NULL) { ciss_printf(sc, "can't allocate register window\n"); return(ENXIO); } sc->ciss_regs_bhandle = rman_get_bushandle(sc->ciss_regs_resource); sc->ciss_regs_btag = rman_get_bustag(sc->ciss_regs_resource); /* * Find the BAR holding the config structure. If it's not the one * we already mapped for registers, map it too. */ sc->ciss_cfg_rid = CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_CFG_BAR) & 0xffff; if (sc->ciss_cfg_rid != sc->ciss_regs_rid) { if ((sc->ciss_cfg_resource = bus_alloc_resource_any(sc->ciss_dev, SYS_RES_MEMORY, &sc->ciss_cfg_rid, RF_ACTIVE)) == NULL) { ciss_printf(sc, "can't allocate config window\n"); return(ENXIO); } cbase = (uintptr_t)rman_get_virtual(sc->ciss_cfg_resource); csize = rman_get_end(sc->ciss_cfg_resource) - rman_get_start(sc->ciss_cfg_resource) + 1; } else { cbase = (uintptr_t)rman_get_virtual(sc->ciss_regs_resource); csize = rman_get_end(sc->ciss_regs_resource) - rman_get_start(sc->ciss_regs_resource) + 1; } cofs = CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_CFG_OFF); /* * Use the base/size/offset values we just calculated to * sanity-check the config structure. If it's OK, point to it. */ if ((cofs + sizeof(struct ciss_config_table)) > csize) { ciss_printf(sc, "config table outside window\n"); return(ENXIO); } sc->ciss_cfg = (struct ciss_config_table *)(cbase + cofs); debug(1, "config struct at %p", sc->ciss_cfg); /* * Calculate the number of request structures/commands we are * going to provide for this adapter. */ sc->ciss_max_requests = min(CISS_MAX_REQUESTS, sc->ciss_cfg->max_outstanding_commands); /* * Validate the config structure. If we supported other transport * methods, we could select amongst them at this point in time. */ if (strncmp(sc->ciss_cfg->signature, "CISS", 4)) { ciss_printf(sc, "config signature mismatch (got '%c%c%c%c')\n", sc->ciss_cfg->signature[0], sc->ciss_cfg->signature[1], sc->ciss_cfg->signature[2], sc->ciss_cfg->signature[3]); return(ENXIO); } /* * Select the mode of operation, prefer Performant. */ if (!(sc->ciss_cfg->supported_methods & (CISS_TRANSPORT_METHOD_SIMPLE | CISS_TRANSPORT_METHOD_PERF))) { ciss_printf(sc, "No supported transport layers: 0x%x\n", sc->ciss_cfg->supported_methods); } switch (ciss_force_transport) { case 1: supported_methods = CISS_TRANSPORT_METHOD_SIMPLE; break; case 2: supported_methods = CISS_TRANSPORT_METHOD_PERF; break; default: /* * Override the capabilities of the BOARD and specify SIMPLE * MODE */ if (ciss_vendor_data[i].flags & CISS_BOARD_SIMPLE) supported_methods = CISS_TRANSPORT_METHOD_SIMPLE; else supported_methods = sc->ciss_cfg->supported_methods; break; } setup: if ((supported_methods & CISS_TRANSPORT_METHOD_PERF) != 0) { method = CISS_TRANSPORT_METHOD_PERF; sc->ciss_perf = (struct ciss_perf_config *)(cbase + cofs + sc->ciss_cfg->transport_offset); if (ciss_init_perf(sc)) { supported_methods &= ~method; goto setup; } } else if (supported_methods & CISS_TRANSPORT_METHOD_SIMPLE) { method = CISS_TRANSPORT_METHOD_SIMPLE; } else { ciss_printf(sc, "No supported transport methods: 0x%x\n", sc->ciss_cfg->supported_methods); return(ENXIO); } /* * Tell it we're using the low 4GB of RAM. Set the default interrupt * coalescing options. */ sc->ciss_cfg->requested_method = method; sc->ciss_cfg->command_physlimit = 0; sc->ciss_cfg->interrupt_coalesce_delay = CISS_INTERRUPT_COALESCE_DELAY; sc->ciss_cfg->interrupt_coalesce_count = CISS_INTERRUPT_COALESCE_COUNT; #ifdef __i386__ sc->ciss_cfg->host_driver |= CISS_DRIVER_SCSI_PREFETCH; #endif if (ciss_update_config(sc)) { ciss_printf(sc, "adapter refuses to accept config update (IDBR 0x%x)\n", CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_IDBR)); return(ENXIO); } if ((sc->ciss_cfg->active_method & method) == 0) { supported_methods &= ~method; if (supported_methods == 0) { ciss_printf(sc, "adapter refuses to go into available transports " "mode (0x%x, 0x%x)\n", supported_methods, sc->ciss_cfg->active_method); return(ENXIO); } else goto setup; } /* * Wait for the adapter to come ready. */ if ((error = ciss_wait_adapter(sc)) != 0) return(error); /* Prepare to possibly use MSIX and/or PERFORMANT interrupts. Normal * interrupts have a rid of 0, this will be overridden if MSIX is used. */ sc->ciss_irq_rid[0] = 0; if (method == CISS_TRANSPORT_METHOD_PERF) { ciss_printf(sc, "PERFORMANT Transport\n"); if ((ciss_force_interrupt != 1) && (ciss_setup_msix(sc) == 0)) { intr = ciss_perf_msi_intr; } else { intr = ciss_perf_intr; } /* XXX The docs say that the 0x01 bit is only for SAS controllers. * Unfortunately, there is no good way to know if this is a SAS * controller. Hopefully enabling this bit universally will work OK. * It seems to work fine for SA6i controllers. */ sc->ciss_interrupt_mask = CISS_TL_PERF_INTR_OPQ | CISS_TL_PERF_INTR_MSI; } else { ciss_printf(sc, "SIMPLE Transport\n"); /* MSIX doesn't seem to work in SIMPLE mode, only enable if it forced */ if (ciss_force_interrupt == 2) /* If this fails, we automatically revert to INTx */ ciss_setup_msix(sc); sc->ciss_perf = NULL; intr = ciss_intr; sc->ciss_interrupt_mask = sqmask; } /* * Turn off interrupts before we go routing anything. */ CISS_TL_SIMPLE_DISABLE_INTERRUPTS(sc); /* * Allocate and set up our interrupt. */ if ((sc->ciss_irq_resource = bus_alloc_resource_any(sc->ciss_dev, SYS_RES_IRQ, &sc->ciss_irq_rid[0], RF_ACTIVE | RF_SHAREABLE)) == NULL) { ciss_printf(sc, "can't allocate interrupt\n"); return(ENXIO); } if (bus_setup_intr(sc->ciss_dev, sc->ciss_irq_resource, INTR_TYPE_CAM|INTR_MPSAFE, NULL, intr, sc, &sc->ciss_intr)) { ciss_printf(sc, "can't set up interrupt\n"); return(ENXIO); } /* * Allocate the parent bus DMA tag appropriate for our PCI * interface. * * Note that "simple" adapters can only address within a 32-bit * span. */ if (bus_dma_tag_create(bus_get_dma_tag(sc->ciss_dev),/* PCI parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ciss_parent_dmat)) { ciss_printf(sc, "can't allocate parent DMA tag\n"); return(ENOMEM); } /* * Create DMA tag for mapping buffers into adapter-addressable * space. */ if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ (CISS_MAX_SG_ELEMENTS - 1) * PAGE_SIZE, /* maxsize */ CISS_MAX_SG_ELEMENTS, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, &sc->ciss_mtx, /* lockfunc, lockarg */ &sc->ciss_buffer_dmat)) { ciss_printf(sc, "can't allocate buffer DMA tag\n"); return(ENOMEM); } return(0); } /************************************************************************ * Setup MSI/MSIX operation (Performant only) * Four interrupts are available, but we only use 1 right now. If MSI-X * isn't avaialble, try using MSI instead. */ static int ciss_setup_msix(struct ciss_softc *sc) { int val, i; /* Weed out devices that don't actually support MSI */ i = ciss_lookup(sc->ciss_dev); if (ciss_vendor_data[i].flags & CISS_BOARD_NOMSI) return (EINVAL); /* * Only need to use the minimum number of MSI vectors, as the driver * doesn't support directed MSIX interrupts. */ val = pci_msix_count(sc->ciss_dev); if (val < CISS_MSI_COUNT) { val = pci_msi_count(sc->ciss_dev); device_printf(sc->ciss_dev, "got %d MSI messages]\n", val); if (val < CISS_MSI_COUNT) return (EINVAL); } val = MIN(val, CISS_MSI_COUNT); if (pci_alloc_msix(sc->ciss_dev, &val) != 0) { if (pci_alloc_msi(sc->ciss_dev, &val) != 0) return (EINVAL); } sc->ciss_msi = val; if (bootverbose) ciss_printf(sc, "Using %d MSIX interrupt%s\n", val, (val != 1) ? "s" : ""); for (i = 0; i < val; i++) sc->ciss_irq_rid[i] = i + 1; return (0); } /************************************************************************ * Setup the Performant structures. */ static int ciss_init_perf(struct ciss_softc *sc) { struct ciss_perf_config *pc = sc->ciss_perf; int reply_size; /* * Create the DMA tag for the reply queue. */ reply_size = sizeof(uint64_t) * sc->ciss_max_requests; if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ reply_size, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ciss_reply_dmat)) { ciss_printf(sc, "can't allocate reply DMA tag\n"); return(ENOMEM); } /* * Allocate memory and make it available for DMA. */ if (bus_dmamem_alloc(sc->ciss_reply_dmat, (void **)&sc->ciss_reply, BUS_DMA_NOWAIT, &sc->ciss_reply_map)) { ciss_printf(sc, "can't allocate reply memory\n"); return(ENOMEM); } bus_dmamap_load(sc->ciss_reply_dmat, sc->ciss_reply_map, sc->ciss_reply, reply_size, ciss_command_map_helper, &sc->ciss_reply_phys, 0); bzero(sc->ciss_reply, reply_size); sc->ciss_cycle = 0x1; sc->ciss_rqidx = 0; /* * Preload the fetch table with common command sizes. This allows the * hardware to not waste bus cycles for typical i/o commands, but also not * tax the driver to be too exact in choosing sizes. The table is optimized * for page-aligned i/o's, but since most i/o comes from the various pagers, * it's a reasonable assumption to make. */ pc->fetch_count[CISS_SG_FETCH_NONE] = (sizeof(struct ciss_command) + 15) / 16; pc->fetch_count[CISS_SG_FETCH_1] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 1 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_2] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 2 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_4] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 4 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_8] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 8 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_16] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 16 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_32] = (sizeof(struct ciss_command) + sizeof(struct ciss_sg_entry) * 32 + 15) / 16; pc->fetch_count[CISS_SG_FETCH_MAX] = (CISS_COMMAND_ALLOC_SIZE + 15) / 16; pc->rq_size = sc->ciss_max_requests; /* XXX less than the card supports? */ pc->rq_count = 1; /* XXX Hardcode for a single queue */ pc->rq_bank_hi = 0; pc->rq_bank_lo = 0; pc->rq[0].rq_addr_hi = 0x0; pc->rq[0].rq_addr_lo = sc->ciss_reply_phys; return(0); } /************************************************************************ * Wait for the adapter to come ready. */ static int ciss_wait_adapter(struct ciss_softc *sc) { int i; debug_called(1); /* * Wait for the adapter to come ready. */ if (!(sc->ciss_cfg->active_method & CISS_TRANSPORT_METHOD_READY)) { ciss_printf(sc, "waiting for adapter to come ready...\n"); for (i = 0; !(sc->ciss_cfg->active_method & CISS_TRANSPORT_METHOD_READY); i++) { DELAY(1000000); /* one second */ if (i > 30) { ciss_printf(sc, "timed out waiting for adapter to come ready\n"); return(EIO); } } } return(0); } /************************************************************************ * Flush the adapter cache. */ static int ciss_flush_adapter(struct ciss_softc *sc) { struct ciss_request *cr; struct ciss_bmic_flush_cache *cbfc; int error, command_status; debug_called(1); cr = NULL; cbfc = NULL; /* * Build a BMIC request to flush the cache. We don't disable * it, as we may be going to do more I/O (eg. we are emulating * the Synchronise Cache command). */ if ((cbfc = malloc(sizeof(*cbfc), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) { error = ENOMEM; goto out; } if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_FLUSH_CACHE, (void **)&cbfc, sizeof(*cbfc))) != 0) goto out; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC FLUSH_CACHE command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: break; default: ciss_printf(sc, "error flushing cache (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } out: if (cbfc != NULL) free(cbfc, CISS_MALLOC_CLASS); if (cr != NULL) ciss_release_request(cr); return(error); } static void ciss_soft_reset(struct ciss_softc *sc) { struct ciss_request *cr = NULL; struct ciss_command *cc; int i, error = 0; for (i = 0; i < sc->ciss_max_logical_bus; i++) { /* only reset proxy controllers */ if (sc->ciss_controllers[i].physical.bus == 0) continue; if ((error = ciss_get_request(sc, &cr)) != 0) break; if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_SOFT_RESET, NULL, 0)) != 0) break; cc = cr->cr_cc; cc->header.address = sc->ciss_controllers[i]; if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) break; ciss_release_request(cr); } if (error) ciss_printf(sc, "error resetting controller (%d)\n", error); if (cr != NULL) ciss_release_request(cr); } /************************************************************************ * Allocate memory for the adapter command structures, initialise * the request structures. * * Note that the entire set of commands are allocated in a single * contiguous slab. */ static int ciss_init_requests(struct ciss_softc *sc) { struct ciss_request *cr; int i; debug_called(1); if (bootverbose) ciss_printf(sc, "using %d of %d available commands\n", sc->ciss_max_requests, sc->ciss_cfg->max_outstanding_commands); /* * Create the DMA tag for commands. */ if (bus_dma_tag_create(sc->ciss_parent_dmat, /* parent */ 32, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->ciss_command_dmat)) { ciss_printf(sc, "can't allocate command DMA tag\n"); return(ENOMEM); } /* * Allocate memory and make it available for DMA. */ if (bus_dmamem_alloc(sc->ciss_command_dmat, (void **)&sc->ciss_command, BUS_DMA_NOWAIT, &sc->ciss_command_map)) { ciss_printf(sc, "can't allocate command memory\n"); return(ENOMEM); } bus_dmamap_load(sc->ciss_command_dmat, sc->ciss_command_map,sc->ciss_command, CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests, ciss_command_map_helper, &sc->ciss_command_phys, 0); bzero(sc->ciss_command, CISS_COMMAND_ALLOC_SIZE * sc->ciss_max_requests); /* * Set up the request and command structures, push requests onto * the free queue. */ for (i = 1; i < sc->ciss_max_requests; i++) { cr = &sc->ciss_request[i]; cr->cr_sc = sc; cr->cr_tag = i; cr->cr_cc = (struct ciss_command *)((uintptr_t)sc->ciss_command + CISS_COMMAND_ALLOC_SIZE * i); cr->cr_ccphys = sc->ciss_command_phys + CISS_COMMAND_ALLOC_SIZE * i; bus_dmamap_create(sc->ciss_buffer_dmat, 0, &cr->cr_datamap); ciss_enqueue_free(cr); } return(0); } static void ciss_command_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { uint32_t *addr; addr = arg; *addr = segs[0].ds_addr; } /************************************************************************ * Identify the adapter, print some information about it. */ static int ciss_identify_adapter(struct ciss_softc *sc) { struct ciss_request *cr; int error, command_status; debug_called(1); cr = NULL; /* * Get a request, allocate storage for the adapter data. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_CTLR, (void **)&sc->ciss_id, sizeof(*sc->ciss_id))) != 0) goto out; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC ID_CTLR command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ break; case CISS_CMD_STATUS_DATA_UNDERRUN: case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "data over/underrun reading adapter information\n"); default: ciss_printf(sc, "error reading adapter information (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } /* sanity-check reply */ if (!(sc->ciss_id->controller_flags & CONTROLLER_FLAGS_BIG_MAP_SUPPORT)) { ciss_printf(sc, "adapter does not support BIG_MAP\n"); error = ENXIO; goto out; } #if 0 /* XXX later revisions may not need this */ sc->ciss_flags |= CISS_FLAG_FAKE_SYNCH; #endif /* XXX only really required for old 5300 adapters? */ sc->ciss_flags |= CISS_FLAG_BMIC_ABORT; /* * Earlier controller specs do not contain these config * entries, so assume that a 0 means its old and assign * these values to the defaults that were established * when this driver was developed for them */ if (sc->ciss_cfg->max_logical_supported == 0) sc->ciss_cfg->max_logical_supported = CISS_MAX_LOGICAL; if (sc->ciss_cfg->max_physical_supported == 0) sc->ciss_cfg->max_physical_supported = CISS_MAX_PHYSICAL; /* print information */ if (bootverbose) { ciss_printf(sc, " %d logical drive%s configured\n", sc->ciss_id->configured_logical_drives, (sc->ciss_id->configured_logical_drives == 1) ? "" : "s"); ciss_printf(sc, " firmware %4.4s\n", sc->ciss_id->running_firmware_revision); ciss_printf(sc, " %d SCSI channels\n", sc->ciss_id->scsi_chip_count); ciss_printf(sc, " signature '%.4s'\n", sc->ciss_cfg->signature); ciss_printf(sc, " valence %d\n", sc->ciss_cfg->valence); ciss_printf(sc, " supported I/O methods 0x%b\n", sc->ciss_cfg->supported_methods, "\20\1READY\2simple\3performant\4MEMQ\n"); ciss_printf(sc, " active I/O method 0x%b\n", sc->ciss_cfg->active_method, "\20\2simple\3performant\4MEMQ\n"); ciss_printf(sc, " 4G page base 0x%08x\n", sc->ciss_cfg->command_physlimit); ciss_printf(sc, " interrupt coalesce delay %dus\n", sc->ciss_cfg->interrupt_coalesce_delay); ciss_printf(sc, " interrupt coalesce count %d\n", sc->ciss_cfg->interrupt_coalesce_count); ciss_printf(sc, " max outstanding commands %d\n", sc->ciss_cfg->max_outstanding_commands); ciss_printf(sc, " bus types 0x%b\n", sc->ciss_cfg->bus_types, "\20\1ultra2\2ultra3\10fibre1\11fibre2\n"); ciss_printf(sc, " server name '%.16s'\n", sc->ciss_cfg->server_name); ciss_printf(sc, " heartbeat 0x%x\n", sc->ciss_cfg->heartbeat); ciss_printf(sc, " max logical logical volumes: %d\n", sc->ciss_cfg->max_logical_supported); ciss_printf(sc, " max physical disks supported: %d\n", sc->ciss_cfg->max_physical_supported); ciss_printf(sc, " max physical disks per logical volume: %d\n", sc->ciss_cfg->max_physical_per_logical); ciss_printf(sc, " JBOD Support is %s\n", (sc->ciss_id->uiYetMoreControllerFlags & YMORE_CONTROLLER_FLAGS_JBOD_SUPPORTED) ? "Available" : "Unavailable"); ciss_printf(sc, " JBOD Mode is %s\n", (sc->ciss_id->PowerUPNvramFlags & PWR_UP_FLAG_JBOD_ENABLED) ? "Enabled" : "Disabled"); } out: if (error) { if (sc->ciss_id != NULL) { free(sc->ciss_id, CISS_MALLOC_CLASS); sc->ciss_id = NULL; } } if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Helper routine for generating a list of logical and physical luns. */ static struct ciss_lun_report * ciss_report_luns(struct ciss_softc *sc, int opcode, int nunits) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_report_cdb *crc; struct ciss_lun_report *cll; int command_status; int report_size; int error = 0; debug_called(1); cr = NULL; cll = NULL; /* * Get a request, allocate storage for the address list. */ if ((error = ciss_get_request(sc, &cr)) != 0) goto out; report_size = sizeof(*cll) + nunits * sizeof(union ciss_device_address); if ((cll = malloc(report_size, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) { ciss_printf(sc, "can't allocate memory for lun report\n"); error = ENOMEM; goto out; } /* * Build the Report Logical/Physical LUNs command. */ cc = cr->cr_cc; cr->cr_data = cll; cr->cr_length = report_size; cr->cr_flags = CISS_REQ_DATAIN; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*crc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 30; /* XXX better suggestions? */ crc = (struct ciss_report_cdb *)&(cc->cdb.cdb[0]); bzero(crc, sizeof(*crc)); crc->opcode = opcode; crc->length = htonl(report_size); /* big-endian field */ cll->list_size = htonl(report_size - sizeof(*cll)); /* big-endian field */ /* * Submit the request and wait for it to complete. (timeout * here should be much greater than above) */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending %d LUN command (%d)\n", opcode, error); goto out; } /* * Check response. Note that data over/underrun is OK. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ case CISS_CMD_STATUS_DATA_UNDERRUN: /* buffer too large, not bad */ break; case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "WARNING: more units than driver limit (%d)\n", sc->ciss_cfg->max_logical_supported); break; default: ciss_printf(sc, "error detecting logical drive configuration (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } ciss_release_request(cr); cr = NULL; out: if (cr != NULL) ciss_release_request(cr); if (error && cll != NULL) { free(cll, CISS_MALLOC_CLASS); cll = NULL; } return(cll); } /************************************************************************ * Find logical drives on the adapter. */ static int ciss_init_logical(struct ciss_softc *sc) { struct ciss_lun_report *cll; int error = 0, i, j; int ndrives; debug_called(1); cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_LOGICAL_LUNS, sc->ciss_cfg->max_logical_supported); if (cll == NULL) { error = ENXIO; goto out; } /* sanity-check reply */ ndrives = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); if ((ndrives < 0) || (ndrives > sc->ciss_cfg->max_logical_supported)) { ciss_printf(sc, "adapter claims to report absurd number of logical drives (%d > %d)\n", ndrives, sc->ciss_cfg->max_logical_supported); error = ENXIO; goto out; } /* * Save logical drive information. */ if (bootverbose) { ciss_printf(sc, "%d logical drive%s\n", ndrives, (ndrives > 1 || ndrives == 0) ? "s" : ""); } sc->ciss_logical = - malloc(sc->ciss_max_logical_bus * sizeof(struct ciss_ldrive *), + mallocarray(sc->ciss_max_logical_bus, sizeof(struct ciss_ldrive *), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_logical == NULL) { error = ENXIO; goto out; } for (i = 0; i < sc->ciss_max_logical_bus; i++) { sc->ciss_logical[i] = - malloc(sc->ciss_cfg->max_logical_supported * + mallocarray(sc->ciss_cfg->max_logical_supported, sizeof(struct ciss_ldrive), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_logical[i] == NULL) { error = ENXIO; goto out; } for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) sc->ciss_logical[i][j].cl_status = CISS_LD_NONEXISTENT; } for (i = 0; i < sc->ciss_cfg->max_logical_supported; i++) { if (i < ndrives) { struct ciss_ldrive *ld; int bus, target; bus = CISS_LUN_TO_BUS(cll->lun[i].logical.lun); target = CISS_LUN_TO_TARGET(cll->lun[i].logical.lun); ld = &sc->ciss_logical[bus][target]; ld->cl_address = cll->lun[i]; ld->cl_controller = &sc->ciss_controllers[bus]; if (ciss_identify_logical(sc, ld) != 0) continue; /* * If the drive has had media exchanged, we should bring it online. */ if (ld->cl_lstatus->media_exchanged) ciss_accept_media(sc, ld); } } out: if (cll != NULL) free(cll, CISS_MALLOC_CLASS); return(error); } static int ciss_init_physical(struct ciss_softc *sc) { struct ciss_lun_report *cll; int error = 0, i; int nphys; int bus, target; debug_called(1); bus = 0; target = 0; cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_PHYSICAL_LUNS, sc->ciss_cfg->max_physical_supported); if (cll == NULL) { error = ENXIO; goto out; } nphys = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); if (bootverbose) { ciss_printf(sc, "%d physical device%s\n", nphys, (nphys > 1 || nphys == 0) ? "s" : ""); } /* * Figure out the bus mapping. * Logical buses include both the local logical bus for local arrays and * proxy buses for remote arrays. Physical buses are numbered by the * controller and represent physical buses that hold physical devices. * We shift these bus numbers so that everything fits into a single flat * numbering space for CAM. Logical buses occupy the first 32 CAM bus * numbers, and the physical bus numbers are shifted to be above that. * This results in the various driver arrays being indexed as follows: * * ciss_controllers[] - indexed by logical bus * ciss_cam_sim[] - indexed by both logical and physical, with physical * being shifted by 32. * ciss_logical[][] - indexed by logical bus * ciss_physical[][] - indexed by physical bus * * XXX This is getting more and more hackish. CISS really doesn't play * well with a standard SCSI model; devices are addressed via magic * cookies, not via b/t/l addresses. Since there is no way to store * the cookie in the CAM device object, we have to keep these lookup * tables handy so that the devices can be found quickly at the cost * of wasting memory and having a convoluted lookup scheme. This * driver should probably be converted to block interface. */ /* * If the L2 and L3 SCSI addresses are 0, this signifies a proxy * controller. A proxy controller is another physical controller * behind the primary PCI controller. We need to know about this * so that BMIC commands can be properly targeted. There can be * proxy controllers attached to a single PCI controller, so * find the highest numbered one so the array can be properly * sized. */ sc->ciss_max_logical_bus = 1; for (i = 0; i < nphys; i++) { if (cll->lun[i].physical.extra_address == 0) { bus = cll->lun[i].physical.bus; sc->ciss_max_logical_bus = max(sc->ciss_max_logical_bus, bus) + 1; } else { bus = CISS_EXTRA_BUS2(cll->lun[i].physical.extra_address); sc->ciss_max_physical_bus = max(sc->ciss_max_physical_bus, bus); } } sc->ciss_controllers = - malloc(sc->ciss_max_logical_bus * sizeof (union ciss_device_address), + mallocarray(sc->ciss_max_logical_bus, sizeof(union ciss_device_address), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_controllers == NULL) { ciss_printf(sc, "Could not allocate memory for controller map\n"); error = ENOMEM; goto out; } /* setup a map of controller addresses */ for (i = 0; i < nphys; i++) { if (cll->lun[i].physical.extra_address == 0) { sc->ciss_controllers[cll->lun[i].physical.bus] = cll->lun[i]; } } sc->ciss_physical = - malloc(sc->ciss_max_physical_bus * sizeof(struct ciss_pdrive *), + mallocarray(sc->ciss_max_physical_bus, sizeof(struct ciss_pdrive *), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_physical == NULL) { ciss_printf(sc, "Could not allocate memory for physical device map\n"); error = ENOMEM; goto out; } for (i = 0; i < sc->ciss_max_physical_bus; i++) { sc->ciss_physical[i] = malloc(sizeof(struct ciss_pdrive) * CISS_MAX_PHYSTGT, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_physical[i] == NULL) { ciss_printf(sc, "Could not allocate memory for target map\n"); error = ENOMEM; goto out; } } ciss_filter_physical(sc, cll); out: if (cll != NULL) free(cll, CISS_MALLOC_CLASS); return(error); } static int ciss_filter_physical(struct ciss_softc *sc, struct ciss_lun_report *cll) { u_int32_t ea; int i, nphys; int bus, target; nphys = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); for (i = 0; i < nphys; i++) { if (cll->lun[i].physical.extra_address == 0) continue; /* * Filter out devices that we don't want. Level 3 LUNs could * probably be supported, but the docs don't give enough of a * hint to know how. * * The mode field of the physical address is likely set to have * hard disks masked out. Honor it unless the user has overridden * us with the tunable. We also munge the inquiry data for these * disks so that they only show up as passthrough devices. Keeping * them visible in this fashion is useful for doing things like * flashing firmware. */ ea = cll->lun[i].physical.extra_address; if ((CISS_EXTRA_BUS3(ea) != 0) || (CISS_EXTRA_TARGET3(ea) != 0) || (CISS_EXTRA_MODE2(ea) == 0x3)) continue; if ((ciss_expose_hidden_physical == 0) && (cll->lun[i].physical.mode == CISS_HDR_ADDRESS_MODE_MASK_PERIPHERAL)) continue; /* * Note: CISS firmware numbers physical busses starting at '1', not * '0'. This numbering is internal to the firmware and is only * used as a hint here. */ bus = CISS_EXTRA_BUS2(ea) - 1; target = CISS_EXTRA_TARGET2(ea); sc->ciss_physical[bus][target].cp_address = cll->lun[i]; sc->ciss_physical[bus][target].cp_online = 1; } return (0); } static int ciss_inquiry_logical(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct scsi_inquiry *inq; int error; int command_status; cr = NULL; bzero(&ld->cl_geometry, sizeof(ld->cl_geometry)); if ((error = ciss_get_request(sc, &cr)) != 0) goto out; cc = cr->cr_cc; cr->cr_data = &ld->cl_geometry; cr->cr_length = sizeof(ld->cl_geometry); cr->cr_flags = CISS_REQ_DATAIN; cc->header.address = ld->cl_address; cc->cdb.cdb_length = 6; cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 30; inq = (struct scsi_inquiry *)&(cc->cdb.cdb[0]); inq->opcode = INQUIRY; inq->byte2 = SI_EVPD; inq->page_code = CISS_VPD_LOGICAL_DRIVE_GEOMETRY; scsi_ulto2b(sizeof(ld->cl_geometry), inq->length); if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error getting geometry (%d)\n", error); goto out; } ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: case CISS_CMD_STATUS_DATA_UNDERRUN: break; case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "WARNING: Data overrun\n"); break; default: ciss_printf(sc, "Error detecting logical drive geometry (%s)\n", ciss_name_command_status(command_status)); break; } out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Identify a logical drive, initialise state related to it. */ static int ciss_identify_logical(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; int error, command_status; debug_called(1); cr = NULL; /* * Build a BMIC request to fetch the drive ID. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_LDRIVE, (void **)&ld->cl_ldrive, sizeof(*ld->cl_ldrive))) != 0) goto out; cc = cr->cr_cc; cc->header.address = *ld->cl_controller; /* target controller */ cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); cbc->log_drive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun); /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC LDRIVE command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ break; case CISS_CMD_STATUS_DATA_UNDERRUN: case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "data over/underrun reading logical drive ID\n"); default: ciss_printf(sc, "error reading logical drive ID (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } ciss_release_request(cr); cr = NULL; /* * Build a CISS BMIC command to get the logical drive status. */ if ((error = ciss_get_ldrive_status(sc, ld)) != 0) goto out; /* * Get the logical drive geometry. */ if ((error = ciss_inquiry_logical(sc, ld)) != 0) goto out; /* * Print the drive's basic characteristics. */ if (bootverbose) { ciss_printf(sc, "logical drive (b%dt%d): %s, %dMB ", CISS_LUN_TO_BUS(ld->cl_address.logical.lun), CISS_LUN_TO_TARGET(ld->cl_address.logical.lun), ciss_name_ldrive_org(ld->cl_ldrive->fault_tolerance), ((ld->cl_ldrive->blocks_available / (1024 * 1024)) * ld->cl_ldrive->block_size)); ciss_print_ldrive(sc, ld); } out: if (error != 0) { /* make the drive not-exist */ ld->cl_status = CISS_LD_NONEXISTENT; if (ld->cl_ldrive != NULL) { free(ld->cl_ldrive, CISS_MALLOC_CLASS); ld->cl_ldrive = NULL; } if (ld->cl_lstatus != NULL) { free(ld->cl_lstatus, CISS_MALLOC_CLASS); ld->cl_lstatus = NULL; } } if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Get status for a logical drive. * * XXX should we also do this in response to Test Unit Ready? */ static int ciss_get_ldrive_status(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; int error, command_status; /* * Build a CISS BMIC command to get the logical drive status. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ID_LSTATUS, (void **)&ld->cl_lstatus, sizeof(*ld->cl_lstatus))) != 0) goto out; cc = cr->cr_cc; cc->header.address = *ld->cl_controller; /* target controller */ cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); cbc->log_drive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun); /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC LSTATUS command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* buffer right size */ break; case CISS_CMD_STATUS_DATA_UNDERRUN: case CISS_CMD_STATUS_DATA_OVERRUN: ciss_printf(sc, "data over/underrun reading logical drive status\n"); default: ciss_printf(sc, "error reading logical drive status (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } /* * Set the drive's summary status based on the returned status. * * XXX testing shows that a failed JBOD drive comes back at next * boot in "queued for expansion" mode. WTF? */ ld->cl_status = ciss_decode_ldrive_status(ld->cl_lstatus->status); out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Notify the adapter of a config update. */ static int ciss_update_config(struct ciss_softc *sc) { int i; debug_called(1); CISS_TL_SIMPLE_WRITE(sc, CISS_TL_SIMPLE_IDBR, CISS_TL_SIMPLE_IDBR_CFG_TABLE); for (i = 0; i < 1000; i++) { if (!(CISS_TL_SIMPLE_READ(sc, CISS_TL_SIMPLE_IDBR) & CISS_TL_SIMPLE_IDBR_CFG_TABLE)) { return(0); } DELAY(1000); } return(1); } /************************************************************************ * Accept new media into a logical drive. * * XXX The drive has previously been offline; it would be good if we * could make sure it's not open right now. */ static int ciss_accept_media(struct ciss_softc *sc, struct ciss_ldrive *ld) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; int command_status; int error = 0, ldrive; ldrive = CISS_LUN_TO_TARGET(ld->cl_address.logical.lun); debug(0, "bringing logical drive %d back online", ldrive); /* * Build a CISS BMIC command to bring the drive back online. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_BMIC_ACCEPT_MEDIA, NULL, 0)) != 0) goto out; cc = cr->cr_cc; cc->header.address = *ld->cl_controller; /* target controller */ cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); cbc->log_drive = ldrive; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC ACCEPT MEDIA command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: /* all OK */ /* we should get a logical drive status changed event here */ break; default: ciss_printf(cr->cr_sc, "error accepting media into failed logical drive (%s)\n", ciss_name_command_status(command_status)); break; } out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Release adapter resources. */ static void ciss_free(struct ciss_softc *sc) { struct ciss_request *cr; int i, j; debug_called(1); /* we're going away */ sc->ciss_flags |= CISS_FLAG_ABORTING; /* terminate the periodic heartbeat routine */ callout_stop(&sc->ciss_periodic); /* cancel the Event Notify chain */ ciss_notify_abort(sc); ciss_kill_notify_thread(sc); /* disconnect from CAM */ if (sc->ciss_cam_sim) { for (i = 0; i < sc->ciss_max_logical_bus; i++) { if (sc->ciss_cam_sim[i]) { xpt_bus_deregister(cam_sim_path(sc->ciss_cam_sim[i])); cam_sim_free(sc->ciss_cam_sim[i], 0); } } for (i = CISS_PHYSICAL_BASE; i < sc->ciss_max_physical_bus + CISS_PHYSICAL_BASE; i++) { if (sc->ciss_cam_sim[i]) { xpt_bus_deregister(cam_sim_path(sc->ciss_cam_sim[i])); cam_sim_free(sc->ciss_cam_sim[i], 0); } } free(sc->ciss_cam_sim, CISS_MALLOC_CLASS); } if (sc->ciss_cam_devq) cam_simq_free(sc->ciss_cam_devq); /* remove the control device */ mtx_unlock(&sc->ciss_mtx); if (sc->ciss_dev_t != NULL) destroy_dev(sc->ciss_dev_t); /* Final cleanup of the callout. */ callout_drain(&sc->ciss_periodic); mtx_destroy(&sc->ciss_mtx); /* free the controller data */ if (sc->ciss_id != NULL) free(sc->ciss_id, CISS_MALLOC_CLASS); /* release I/O resources */ if (sc->ciss_regs_resource != NULL) bus_release_resource(sc->ciss_dev, SYS_RES_MEMORY, sc->ciss_regs_rid, sc->ciss_regs_resource); if (sc->ciss_cfg_resource != NULL) bus_release_resource(sc->ciss_dev, SYS_RES_MEMORY, sc->ciss_cfg_rid, sc->ciss_cfg_resource); if (sc->ciss_intr != NULL) bus_teardown_intr(sc->ciss_dev, sc->ciss_irq_resource, sc->ciss_intr); if (sc->ciss_irq_resource != NULL) bus_release_resource(sc->ciss_dev, SYS_RES_IRQ, sc->ciss_irq_rid[0], sc->ciss_irq_resource); if (sc->ciss_msi) pci_release_msi(sc->ciss_dev); while ((cr = ciss_dequeue_free(sc)) != NULL) bus_dmamap_destroy(sc->ciss_buffer_dmat, cr->cr_datamap); if (sc->ciss_buffer_dmat) bus_dma_tag_destroy(sc->ciss_buffer_dmat); /* destroy command memory and DMA tag */ if (sc->ciss_command != NULL) { bus_dmamap_unload(sc->ciss_command_dmat, sc->ciss_command_map); bus_dmamem_free(sc->ciss_command_dmat, sc->ciss_command, sc->ciss_command_map); } if (sc->ciss_command_dmat) bus_dma_tag_destroy(sc->ciss_command_dmat); if (sc->ciss_reply) { bus_dmamap_unload(sc->ciss_reply_dmat, sc->ciss_reply_map); bus_dmamem_free(sc->ciss_reply_dmat, sc->ciss_reply, sc->ciss_reply_map); } if (sc->ciss_reply_dmat) bus_dma_tag_destroy(sc->ciss_reply_dmat); /* destroy DMA tags */ if (sc->ciss_parent_dmat) bus_dma_tag_destroy(sc->ciss_parent_dmat); if (sc->ciss_logical) { for (i = 0; i < sc->ciss_max_logical_bus; i++) { for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) { if (sc->ciss_logical[i][j].cl_ldrive) free(sc->ciss_logical[i][j].cl_ldrive, CISS_MALLOC_CLASS); if (sc->ciss_logical[i][j].cl_lstatus) free(sc->ciss_logical[i][j].cl_lstatus, CISS_MALLOC_CLASS); } free(sc->ciss_logical[i], CISS_MALLOC_CLASS); } free(sc->ciss_logical, CISS_MALLOC_CLASS); } if (sc->ciss_physical) { for (i = 0; i < sc->ciss_max_physical_bus; i++) free(sc->ciss_physical[i], CISS_MALLOC_CLASS); free(sc->ciss_physical, CISS_MALLOC_CLASS); } if (sc->ciss_controllers) free(sc->ciss_controllers, CISS_MALLOC_CLASS); } /************************************************************************ * Give a command to the adapter. * * Note that this uses the simple transport layer directly. If we * want to add support for other layers, we'll need a switch of some * sort. * * Note that the simple transport layer has no way of refusing a * command; we only have as many request structures as the adapter * supports commands, so we don't have to check (this presumes that * the adapter can handle commands as fast as we throw them at it). */ static int ciss_start(struct ciss_request *cr) { struct ciss_command *cc; /* XXX debugging only */ int error; cc = cr->cr_cc; debug(2, "post command %d tag %d ", cr->cr_tag, cc->header.host_tag); /* * Map the request's data. */ if ((error = ciss_map_request(cr))) return(error); #if 0 ciss_print_request(cr); #endif return(0); } /************************************************************************ * Fetch completed request(s) from the adapter, queue them for * completion handling. * * Note that this uses the simple transport layer directly. If we * want to add support for other layers, we'll need a switch of some * sort. * * Note that the simple transport mechanism does not require any * reentrancy protection; the OPQ read is atomic. If there is a * chance of a race with something else that might move the request * off the busy list, then we will have to lock against that * (eg. timeouts, etc.) */ static void ciss_done(struct ciss_softc *sc, cr_qhead_t *qh) { struct ciss_request *cr; struct ciss_command *cc; u_int32_t tag, index; debug_called(3); /* * Loop quickly taking requests from the adapter and moving them * to the completed queue. */ for (;;) { tag = CISS_TL_SIMPLE_FETCH_CMD(sc); if (tag == CISS_TL_SIMPLE_OPQ_EMPTY) break; index = tag >> 2; debug(2, "completed command %d%s", index, (tag & CISS_HDR_HOST_TAG_ERROR) ? " with error" : ""); if (index >= sc->ciss_max_requests) { ciss_printf(sc, "completed invalid request %d (0x%x)\n", index, tag); continue; } cr = &(sc->ciss_request[index]); cc = cr->cr_cc; cc->header.host_tag = tag; /* not updated by adapter */ ciss_enqueue_complete(cr, qh); } } static void ciss_perf_done(struct ciss_softc *sc, cr_qhead_t *qh) { struct ciss_request *cr; struct ciss_command *cc; u_int32_t tag, index; debug_called(3); /* * Loop quickly taking requests from the adapter and moving them * to the completed queue. */ for (;;) { tag = sc->ciss_reply[sc->ciss_rqidx]; if ((tag & CISS_CYCLE_MASK) != sc->ciss_cycle) break; index = tag >> 2; debug(2, "completed command %d%s\n", index, (tag & CISS_HDR_HOST_TAG_ERROR) ? " with error" : ""); if (index < sc->ciss_max_requests) { cr = &(sc->ciss_request[index]); cc = cr->cr_cc; cc->header.host_tag = tag; /* not updated by adapter */ ciss_enqueue_complete(cr, qh); } else { ciss_printf(sc, "completed invalid request %d (0x%x)\n", index, tag); } if (++sc->ciss_rqidx == sc->ciss_max_requests) { sc->ciss_rqidx = 0; sc->ciss_cycle ^= 1; } } } /************************************************************************ * Take an interrupt from the adapter. */ static void ciss_intr(void *arg) { cr_qhead_t qh; struct ciss_softc *sc = (struct ciss_softc *)arg; /* * The only interrupt we recognise indicates that there are * entries in the outbound post queue. */ STAILQ_INIT(&qh); ciss_done(sc, &qh); mtx_lock(&sc->ciss_mtx); ciss_complete(sc, &qh); mtx_unlock(&sc->ciss_mtx); } static void ciss_perf_intr(void *arg) { struct ciss_softc *sc = (struct ciss_softc *)arg; /* Clear the interrupt and flush the bridges. Docs say that the flush * needs to be done twice, which doesn't seem right. */ CISS_TL_PERF_CLEAR_INT(sc); CISS_TL_PERF_FLUSH_INT(sc); ciss_perf_msi_intr(sc); } static void ciss_perf_msi_intr(void *arg) { cr_qhead_t qh; struct ciss_softc *sc = (struct ciss_softc *)arg; STAILQ_INIT(&qh); ciss_perf_done(sc, &qh); mtx_lock(&sc->ciss_mtx); ciss_complete(sc, &qh); mtx_unlock(&sc->ciss_mtx); } /************************************************************************ * Process completed requests. * * Requests can be completed in three fashions: * * - by invoking a callback function (cr_complete is non-null) * - by waking up a sleeper (cr_flags has CISS_REQ_SLEEP set) * - by clearing the CISS_REQ_POLL flag in interrupt/timeout context */ static void ciss_complete(struct ciss_softc *sc, cr_qhead_t *qh) { struct ciss_request *cr; debug_called(2); /* * Loop taking requests off the completed queue and performing * completion processing on them. */ for (;;) { if ((cr = ciss_dequeue_complete(sc, qh)) == NULL) break; ciss_unmap_request(cr); if ((cr->cr_flags & CISS_REQ_BUSY) == 0) ciss_printf(sc, "WARNING: completing non-busy request\n"); cr->cr_flags &= ~CISS_REQ_BUSY; /* * If the request has a callback, invoke it. */ if (cr->cr_complete != NULL) { cr->cr_complete(cr); continue; } /* * If someone is sleeping on this request, wake them up. */ if (cr->cr_flags & CISS_REQ_SLEEP) { cr->cr_flags &= ~CISS_REQ_SLEEP; wakeup(cr); continue; } /* * If someone is polling this request for completion, signal. */ if (cr->cr_flags & CISS_REQ_POLL) { cr->cr_flags &= ~CISS_REQ_POLL; continue; } /* * Give up and throw the request back on the free queue. This * should never happen; resources will probably be lost. */ ciss_printf(sc, "WARNING: completed command with no submitter\n"); ciss_enqueue_free(cr); } } /************************************************************************ * Report on the completion status of a request, and pass back SCSI * and command status values. */ static int _ciss_report_request(struct ciss_request *cr, int *command_status, int *scsi_status, const char *func) { struct ciss_command *cc; struct ciss_error_info *ce; debug_called(2); cc = cr->cr_cc; ce = (struct ciss_error_info *)&(cc->sg[0]); /* * We don't consider data under/overrun an error for the Report * Logical/Physical LUNs commands. */ if ((cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) && ((ce->command_status == CISS_CMD_STATUS_DATA_OVERRUN) || (ce->command_status == CISS_CMD_STATUS_DATA_UNDERRUN)) && ((cc->cdb.cdb[0] == CISS_OPCODE_REPORT_LOGICAL_LUNS) || (cc->cdb.cdb[0] == CISS_OPCODE_REPORT_PHYSICAL_LUNS) || (cc->cdb.cdb[0] == INQUIRY))) { cc->header.host_tag &= ~CISS_HDR_HOST_TAG_ERROR; debug(2, "ignoring irrelevant under/overrun error"); } /* * Check the command's error bit, if clear, there's no status and * everything is OK. */ if (!(cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR)) { if (scsi_status != NULL) *scsi_status = SCSI_STATUS_OK; if (command_status != NULL) *command_status = CISS_CMD_STATUS_SUCCESS; return(0); } else { if (command_status != NULL) *command_status = ce->command_status; if (scsi_status != NULL) { if (ce->command_status == CISS_CMD_STATUS_TARGET_STATUS) { *scsi_status = ce->scsi_status; } else { *scsi_status = -1; } } if (bootverbose) ciss_printf(cr->cr_sc, "command status 0x%x (%s) scsi status 0x%x\n", ce->command_status, ciss_name_command_status(ce->command_status), ce->scsi_status); if (ce->command_status == CISS_CMD_STATUS_INVALID_COMMAND) { ciss_printf(cr->cr_sc, "invalid command, offense size %d at %d, value 0x%x, function %s\n", ce->additional_error_info.invalid_command.offense_size, ce->additional_error_info.invalid_command.offense_offset, ce->additional_error_info.invalid_command.offense_value, func); } } #if 0 ciss_print_request(cr); #endif return(1); } /************************************************************************ * Issue a request and don't return until it's completed. * * Depending on adapter status, we may poll or sleep waiting for * completion. */ static int ciss_synch_request(struct ciss_request *cr, int timeout) { if (cr->cr_sc->ciss_flags & CISS_FLAG_RUNNING) { return(ciss_wait_request(cr, timeout)); } else { return(ciss_poll_request(cr, timeout)); } } /************************************************************************ * Issue a request and poll for completion. * * Timeout in milliseconds. */ static int ciss_poll_request(struct ciss_request *cr, int timeout) { cr_qhead_t qh; struct ciss_softc *sc; int error; debug_called(2); STAILQ_INIT(&qh); sc = cr->cr_sc; cr->cr_flags |= CISS_REQ_POLL; if ((error = ciss_start(cr)) != 0) return(error); do { if (sc->ciss_perf) ciss_perf_done(sc, &qh); else ciss_done(sc, &qh); ciss_complete(sc, &qh); if (!(cr->cr_flags & CISS_REQ_POLL)) return(0); DELAY(1000); } while (timeout-- >= 0); return(EWOULDBLOCK); } /************************************************************************ * Issue a request and sleep waiting for completion. * * Timeout in milliseconds. Note that a spurious wakeup will reset * the timeout. */ static int ciss_wait_request(struct ciss_request *cr, int timeout) { int error; debug_called(2); cr->cr_flags |= CISS_REQ_SLEEP; if ((error = ciss_start(cr)) != 0) return(error); while ((cr->cr_flags & CISS_REQ_SLEEP) && (error != EWOULDBLOCK)) { error = msleep_sbt(cr, &cr->cr_sc->ciss_mtx, PRIBIO, "cissREQ", SBT_1MS * timeout, 0, 0); } return(error); } #if 0 /************************************************************************ * Abort a request. Note that a potential exists here to race the * request being completed; the caller must deal with this. */ static int ciss_abort_request(struct ciss_request *ar) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_message_cdb *cmc; int error; debug_called(1); /* get a request */ if ((error = ciss_get_request(ar->cr_sc, &cr)) != 0) return(error); /* build the abort command */ cc = cr->cr_cc; cc->header.address.mode.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; /* addressing? */ cc->header.address.physical.target = 0; cc->header.address.physical.bus = 0; cc->cdb.cdb_length = sizeof(*cmc); cc->cdb.type = CISS_CDB_TYPE_MESSAGE; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_NONE; cc->cdb.timeout = 30; cmc = (struct ciss_message_cdb *)&(cc->cdb.cdb[0]); cmc->opcode = CISS_OPCODE_MESSAGE_ABORT; cmc->type = CISS_MESSAGE_ABORT_TASK; cmc->abort_tag = ar->cr_tag; /* endianness?? */ /* * Send the request and wait for a response. If we believe we * aborted the request OK, clear the flag that indicates it's * running. */ error = ciss_synch_request(cr, 35 * 1000); if (!error) error = ciss_report_request(cr, NULL, NULL); ciss_release_request(cr); return(error); } #endif /************************************************************************ * Fetch and initialise a request */ static int ciss_get_request(struct ciss_softc *sc, struct ciss_request **crp) { struct ciss_request *cr; debug_called(2); /* * Get a request and clean it up. */ if ((cr = ciss_dequeue_free(sc)) == NULL) return(ENOMEM); cr->cr_data = NULL; cr->cr_flags = 0; cr->cr_complete = NULL; cr->cr_private = NULL; cr->cr_sg_tag = CISS_SG_MAX; /* Backstop to prevent accidents */ ciss_preen_command(cr); *crp = cr; return(0); } static void ciss_preen_command(struct ciss_request *cr) { struct ciss_command *cc; u_int32_t cmdphys; /* * Clean up the command structure. * * Note that we set up the error_info structure here, since the * length can be overwritten by any command. */ cc = cr->cr_cc; cc->header.sg_in_list = 0; /* kinda inefficient this way */ cc->header.sg_total = 0; cc->header.host_tag = cr->cr_tag << 2; cc->header.host_tag_zeroes = 0; bzero(&(cc->sg[0]), CISS_COMMAND_ALLOC_SIZE - sizeof(struct ciss_command)); cmdphys = cr->cr_ccphys; cc->error_info.error_info_address = cmdphys + sizeof(struct ciss_command); cc->error_info.error_info_length = CISS_COMMAND_ALLOC_SIZE - sizeof(struct ciss_command); } /************************************************************************ * Release a request to the free list. */ static void ciss_release_request(struct ciss_request *cr) { struct ciss_softc *sc; debug_called(2); sc = cr->cr_sc; /* release the request to the free queue */ ciss_requeue_free(cr); } /************************************************************************ * Allocate a request that will be used to send a BMIC command. Do some * of the common setup here to avoid duplicating it everywhere else. */ static int ciss_get_bmic_request(struct ciss_softc *sc, struct ciss_request **crp, int opcode, void **bufp, size_t bufsize) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_bmic_cdb *cbc; void *buf; int error; int dataout; debug_called(2); cr = NULL; buf = NULL; /* * Get a request. */ if ((error = ciss_get_request(sc, &cr)) != 0) goto out; /* * Allocate data storage if requested, determine the data direction. */ dataout = 0; if ((bufsize > 0) && (bufp != NULL)) { if (*bufp == NULL) { if ((buf = malloc(bufsize, CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO)) == NULL) { error = ENOMEM; goto out; } } else { buf = *bufp; dataout = 1; /* we are given a buffer, so we are writing */ } } /* * Build a CISS BMIC command to get the logical drive ID. */ cr->cr_data = buf; cr->cr_length = bufsize; if (!dataout) cr->cr_flags = CISS_REQ_DATAIN; cc = cr->cr_cc; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*cbc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = dataout ? CISS_CDB_DIRECTION_WRITE : CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 0; cbc = (struct ciss_bmic_cdb *)&(cc->cdb.cdb[0]); bzero(cbc, sizeof(*cbc)); cbc->opcode = dataout ? CISS_ARRAY_CONTROLLER_WRITE : CISS_ARRAY_CONTROLLER_READ; cbc->bmic_opcode = opcode; cbc->size = htons((u_int16_t)bufsize); out: if (error) { if (cr != NULL) ciss_release_request(cr); } else { *crp = cr; if ((bufp != NULL) && (*bufp == NULL) && (buf != NULL)) *bufp = buf; } return(error); } /************************************************************************ * Handle a command passed in from userspace. */ static int ciss_user_command(struct ciss_softc *sc, IOCTL_Command_struct *ioc) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_error_info *ce; int error = 0; debug_called(1); cr = NULL; /* * Get a request. */ while (ciss_get_request(sc, &cr) != 0) msleep(sc, &sc->ciss_mtx, PPAUSE, "cissREQ", hz); cc = cr->cr_cc; /* * Allocate an in-kernel databuffer if required, copy in user data. */ mtx_unlock(&sc->ciss_mtx); cr->cr_length = ioc->buf_size; if (ioc->buf_size > 0) { if ((cr->cr_data = malloc(ioc->buf_size, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) { error = ENOMEM; goto out_unlocked; } if ((error = copyin(ioc->buf, cr->cr_data, ioc->buf_size))) { debug(0, "copyin: bad data buffer %p/%d", ioc->buf, ioc->buf_size); goto out_unlocked; } } /* * Build the request based on the user command. */ bcopy(&ioc->LUN_info, &cc->header.address, sizeof(cc->header.address)); bcopy(&ioc->Request, &cc->cdb, sizeof(cc->cdb)); /* XXX anything else to populate here? */ mtx_lock(&sc->ciss_mtx); /* * Run the command. */ if ((error = ciss_synch_request(cr, 60 * 1000))) { debug(0, "request failed - %d", error); goto out; } /* * Check to see if the command succeeded. */ ce = (struct ciss_error_info *)&(cc->sg[0]); if ((cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) == 0) bzero(ce, sizeof(*ce)); /* * Copy the results back to the user. */ bcopy(ce, &ioc->error_info, sizeof(*ce)); mtx_unlock(&sc->ciss_mtx); if ((ioc->buf_size > 0) && (error = copyout(cr->cr_data, ioc->buf, ioc->buf_size))) { debug(0, "copyout: bad data buffer %p/%d", ioc->buf, ioc->buf_size); goto out_unlocked; } /* done OK */ error = 0; out_unlocked: mtx_lock(&sc->ciss_mtx); out: if ((cr != NULL) && (cr->cr_data != NULL)) free(cr->cr_data, CISS_MALLOC_CLASS); if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Map a request into bus-visible space, initialise the scatter/gather * list. */ static int ciss_map_request(struct ciss_request *cr) { struct ciss_softc *sc; int error = 0; debug_called(2); sc = cr->cr_sc; /* check that mapping is necessary */ if (cr->cr_flags & CISS_REQ_MAPPED) return(0); cr->cr_flags |= CISS_REQ_MAPPED; bus_dmamap_sync(sc->ciss_command_dmat, sc->ciss_command_map, BUS_DMASYNC_PREWRITE); if (cr->cr_data != NULL) { if (cr->cr_flags & CISS_REQ_CCB) error = bus_dmamap_load_ccb(sc->ciss_buffer_dmat, cr->cr_datamap, cr->cr_data, ciss_request_map_helper, cr, 0); else error = bus_dmamap_load(sc->ciss_buffer_dmat, cr->cr_datamap, cr->cr_data, cr->cr_length, ciss_request_map_helper, cr, 0); if (error != 0) return (error); } else { /* * Post the command to the adapter. */ cr->cr_sg_tag = CISS_SG_NONE; cr->cr_flags |= CISS_REQ_BUSY; if (sc->ciss_perf) CISS_TL_PERF_POST_CMD(sc, cr); else CISS_TL_SIMPLE_POST_CMD(sc, cr->cr_ccphys); } return(0); } static void ciss_request_map_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct ciss_command *cc; struct ciss_request *cr; struct ciss_softc *sc; int i; debug_called(2); cr = (struct ciss_request *)arg; sc = cr->cr_sc; cc = cr->cr_cc; for (i = 0; i < nseg; i++) { cc->sg[i].address = segs[i].ds_addr; cc->sg[i].length = segs[i].ds_len; cc->sg[i].extension = 0; } /* we leave the s/g table entirely within the command */ cc->header.sg_in_list = nseg; cc->header.sg_total = nseg; if (cr->cr_flags & CISS_REQ_DATAIN) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_PREREAD); if (cr->cr_flags & CISS_REQ_DATAOUT) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_PREWRITE); if (nseg == 0) cr->cr_sg_tag = CISS_SG_NONE; else if (nseg == 1) cr->cr_sg_tag = CISS_SG_1; else if (nseg == 2) cr->cr_sg_tag = CISS_SG_2; else if (nseg <= 4) cr->cr_sg_tag = CISS_SG_4; else if (nseg <= 8) cr->cr_sg_tag = CISS_SG_8; else if (nseg <= 16) cr->cr_sg_tag = CISS_SG_16; else if (nseg <= 32) cr->cr_sg_tag = CISS_SG_32; else cr->cr_sg_tag = CISS_SG_MAX; /* * Post the command to the adapter. */ cr->cr_flags |= CISS_REQ_BUSY; if (sc->ciss_perf) CISS_TL_PERF_POST_CMD(sc, cr); else CISS_TL_SIMPLE_POST_CMD(sc, cr->cr_ccphys); } /************************************************************************ * Unmap a request from bus-visible space. */ static void ciss_unmap_request(struct ciss_request *cr) { struct ciss_softc *sc; debug_called(2); sc = cr->cr_sc; /* check that unmapping is necessary */ if ((cr->cr_flags & CISS_REQ_MAPPED) == 0) return; bus_dmamap_sync(sc->ciss_command_dmat, sc->ciss_command_map, BUS_DMASYNC_POSTWRITE); if (cr->cr_data == NULL) goto out; if (cr->cr_flags & CISS_REQ_DATAIN) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_POSTREAD); if (cr->cr_flags & CISS_REQ_DATAOUT) bus_dmamap_sync(sc->ciss_buffer_dmat, cr->cr_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->ciss_buffer_dmat, cr->cr_datamap); out: cr->cr_flags &= ~CISS_REQ_MAPPED; } /************************************************************************ * Attach the driver to CAM. * * We put all the logical drives on a single SCSI bus. */ static int ciss_cam_init(struct ciss_softc *sc) { int i, maxbus; debug_called(1); /* * Allocate a devq. We can reuse this for the masked physical * devices if we decide to export these as well. */ if ((sc->ciss_cam_devq = cam_simq_alloc(sc->ciss_max_requests - 2)) == NULL) { ciss_printf(sc, "can't allocate CAM SIM queue\n"); return(ENOMEM); } /* * Create a SIM. * * This naturally wastes a bit of memory. The alternative is to allocate * and register each bus as it is found, and then track them on a linked * list. Unfortunately, the driver has a few places where it needs to * look up the SIM based solely on bus number, and it's unclear whether * a list traversal would work for these situations. */ maxbus = max(sc->ciss_max_logical_bus, sc->ciss_max_physical_bus + CISS_PHYSICAL_BASE); - sc->ciss_cam_sim = malloc(maxbus * sizeof(struct cam_sim*), + sc->ciss_cam_sim = mallocarray(maxbus, sizeof(struct cam_sim*), CISS_MALLOC_CLASS, M_NOWAIT | M_ZERO); if (sc->ciss_cam_sim == NULL) { ciss_printf(sc, "can't allocate memory for controller SIM\n"); return(ENOMEM); } for (i = 0; i < sc->ciss_max_logical_bus; i++) { if ((sc->ciss_cam_sim[i] = cam_sim_alloc(ciss_cam_action, ciss_cam_poll, "ciss", sc, device_get_unit(sc->ciss_dev), &sc->ciss_mtx, 2, sc->ciss_max_requests - 2, sc->ciss_cam_devq)) == NULL) { ciss_printf(sc, "can't allocate CAM SIM for controller %d\n", i); return(ENOMEM); } /* * Register bus with this SIM. */ mtx_lock(&sc->ciss_mtx); if (i == 0 || sc->ciss_controllers[i].physical.bus != 0) { if (xpt_bus_register(sc->ciss_cam_sim[i], sc->ciss_dev, i) != 0) { ciss_printf(sc, "can't register SCSI bus %d\n", i); mtx_unlock(&sc->ciss_mtx); return (ENXIO); } } mtx_unlock(&sc->ciss_mtx); } for (i = CISS_PHYSICAL_BASE; i < sc->ciss_max_physical_bus + CISS_PHYSICAL_BASE; i++) { if ((sc->ciss_cam_sim[i] = cam_sim_alloc(ciss_cam_action, ciss_cam_poll, "ciss", sc, device_get_unit(sc->ciss_dev), &sc->ciss_mtx, 1, sc->ciss_max_requests - 2, sc->ciss_cam_devq)) == NULL) { ciss_printf(sc, "can't allocate CAM SIM for controller %d\n", i); return (ENOMEM); } mtx_lock(&sc->ciss_mtx); if (xpt_bus_register(sc->ciss_cam_sim[i], sc->ciss_dev, i) != 0) { ciss_printf(sc, "can't register SCSI bus %d\n", i); mtx_unlock(&sc->ciss_mtx); return (ENXIO); } mtx_unlock(&sc->ciss_mtx); } return(0); } /************************************************************************ * Initiate a rescan of the 'logical devices' SIM */ static void ciss_cam_rescan_target(struct ciss_softc *sc, int bus, int target) { union ccb *ccb; debug_called(1); if ((ccb = xpt_alloc_ccb_nowait()) == NULL) { ciss_printf(sc, "rescan failed (can't allocate CCB)\n"); return; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->ciss_cam_sim[bus]), target, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { ciss_printf(sc, "rescan failed (can't create path)\n"); xpt_free_ccb(ccb); return; } xpt_rescan(ccb); /* scan is now in progress */ } /************************************************************************ * Handle requests coming from CAM */ static void ciss_cam_action(struct cam_sim *sim, union ccb *ccb) { struct ciss_softc *sc; struct ccb_scsiio *csio; int bus, target; int physical; sc = cam_sim_softc(sim); bus = cam_sim_bus(sim); csio = (struct ccb_scsiio *)&ccb->csio; target = csio->ccb_h.target_id; physical = CISS_IS_PHYSICAL(bus); switch (ccb->ccb_h.func_code) { /* perform SCSI I/O */ case XPT_SCSI_IO: if (!ciss_cam_action_io(sim, csio)) return; break; /* perform geometry calculations */ case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg = &ccb->ccg; struct ciss_ldrive *ld; debug(1, "XPT_CALC_GEOMETRY %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); ld = NULL; if (!physical) ld = &sc->ciss_logical[bus][target]; /* * Use the cached geometry settings unless the fault tolerance * is invalid. */ if (physical || ld->cl_geometry.fault_tolerance == 0xFF) { u_int32_t secs_per_cylinder; ccg->heads = 255; ccg->secs_per_track = 32; secs_per_cylinder = ccg->heads * ccg->secs_per_track; ccg->cylinders = ccg->volume_size / secs_per_cylinder; } else { ccg->heads = ld->cl_geometry.heads; ccg->secs_per_track = ld->cl_geometry.sectors; ccg->cylinders = ntohs(ld->cl_geometry.cylinders); } ccb->ccb_h.status = CAM_REQ_CMP; break; } /* handle path attribute inquiry */ case XPT_PATH_INQ: { struct ccb_pathinq *cpi = &ccb->cpi; int sg_length; debug(1, "XPT_PATH_INQ %d:%d:%d", cam_sim_bus(sim), ccb->ccb_h.target_id, ccb->ccb_h.target_lun); cpi->version_num = 1; cpi->hba_inquiry = PI_TAG_ABLE; /* XXX is this correct? */ cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->max_target = sc->ciss_cfg->max_logical_supported; cpi->max_lun = 0; /* 'logical drive' channel only */ cpi->initiator_id = sc->ciss_cfg->max_logical_supported; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "CISS", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 132 * 1024; /* XXX what to set this to? */ cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; if (sc->ciss_cfg->max_sg_length == 0) { sg_length = 17; } else { /* XXX Fix for ZMR cards that advertise max_sg_length == 32 * Confusing bit here. max_sg_length is usually a power of 2. We always * need to subtract 1 to account for partial pages. Then we need to * align on a valid PAGE_SIZE so we round down to the nearest power of 2. * Add 1 so we can then subtract it out in the assignment to maxio. * The reason for all these shenanigans is to create a maxio value that * creates IO operations to volumes that yield consistent operations * with good performance. */ sg_length = sc->ciss_cfg->max_sg_length - 1; sg_length = (1 << (fls(sg_length) - 1)) + 1; } cpi->maxio = (min(CISS_MAX_SG_ELEMENTS, sg_length) - 1) * PAGE_SIZE; ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; int bus, target; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; bus = cam_sim_bus(sim); target = cts->ccb_h.target_id; debug(1, "XPT_GET_TRAN_SETTINGS %d:%d", bus, target); /* disconnect always OK */ cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; spi->valid = CTS_SPI_VALID_DISC; spi->flags = CTS_SPI_FLAGS_DISC_ENB; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; cts->ccb_h.status = CAM_REQ_CMP; break; } default: /* we can't do this */ debug(1, "unspported func_code = 0x%x", ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } /************************************************************************ * Handle a CAM SCSI I/O request. */ static int ciss_cam_action_io(struct cam_sim *sim, struct ccb_scsiio *csio) { struct ciss_softc *sc; int bus, target; struct ciss_request *cr; struct ciss_command *cc; int error; sc = cam_sim_softc(sim); bus = cam_sim_bus(sim); target = csio->ccb_h.target_id; debug(2, "XPT_SCSI_IO %d:%d:%d", bus, target, csio->ccb_h.target_lun); /* check that the CDB pointer is not to a physical address */ if ((csio->ccb_h.flags & CAM_CDB_POINTER) && (csio->ccb_h.flags & CAM_CDB_PHYS)) { debug(3, " CDB pointer is to physical address"); csio->ccb_h.status = CAM_REQ_CMP_ERR; } /* abandon aborted ccbs or those that have failed validation */ if ((csio->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { debug(3, "abandoning CCB due to abort/validation failure"); return(EINVAL); } /* handle emulation of some SCSI commands ourself */ if (ciss_cam_emulate(sc, csio)) return(0); /* * Get a request to manage this command. If we can't, return the * ccb, freeze the queue and flag so that we unfreeze it when a * request completes. */ if ((error = ciss_get_request(sc, &cr)) != 0) { xpt_freeze_simq(sim, 1); sc->ciss_flags |= CISS_FLAG_BUSY; csio->ccb_h.status |= CAM_REQUEUE_REQ; return(error); } /* * Build the command. */ cc = cr->cr_cc; cr->cr_data = csio; cr->cr_length = csio->dxfer_len; cr->cr_complete = ciss_cam_complete; cr->cr_private = csio; /* * Target the right logical volume. */ if (CISS_IS_PHYSICAL(bus)) cc->header.address = sc->ciss_physical[CISS_CAM_TO_PBUS(bus)][target].cp_address; else cc->header.address = sc->ciss_logical[bus][target].cl_address; cc->cdb.cdb_length = csio->cdb_len; cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; /* XXX ordered tags? */ if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { cr->cr_flags = CISS_REQ_DATAOUT | CISS_REQ_CCB; cc->cdb.direction = CISS_CDB_DIRECTION_WRITE; } else if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { cr->cr_flags = CISS_REQ_DATAIN | CISS_REQ_CCB; cc->cdb.direction = CISS_CDB_DIRECTION_READ; } else { cr->cr_data = NULL; cr->cr_flags = 0; cc->cdb.direction = CISS_CDB_DIRECTION_NONE; } cc->cdb.timeout = (csio->ccb_h.timeout / 1000) + 1; if (csio->ccb_h.flags & CAM_CDB_POINTER) { bcopy(csio->cdb_io.cdb_ptr, &cc->cdb.cdb[0], csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, &cc->cdb.cdb[0], csio->cdb_len); } /* * Submit the request to the adapter. * * Note that this may fail if we're unable to map the request (and * if we ever learn a transport layer other than simple, may fail * if the adapter rejects the command). */ if ((error = ciss_start(cr)) != 0) { xpt_freeze_simq(sim, 1); csio->ccb_h.status |= CAM_RELEASE_SIMQ; if (error == EINPROGRESS) { error = 0; } else { csio->ccb_h.status |= CAM_REQUEUE_REQ; ciss_release_request(cr); } return(error); } return(0); } /************************************************************************ * Emulate SCSI commands the adapter doesn't handle as we might like. */ static int ciss_cam_emulate(struct ciss_softc *sc, struct ccb_scsiio *csio) { int bus, target; u_int8_t opcode; target = csio->ccb_h.target_id; bus = cam_sim_bus(xpt_path_sim(csio->ccb_h.path)); opcode = (csio->ccb_h.flags & CAM_CDB_POINTER) ? *(u_int8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes[0]; if (CISS_IS_PHYSICAL(bus)) { if (sc->ciss_physical[CISS_CAM_TO_PBUS(bus)][target].cp_online != 1) { csio->ccb_h.status |= CAM_SEL_TIMEOUT; xpt_done((union ccb *)csio); return(1); } else return(0); } /* * Handle requests for volumes that don't exist or are not online. * A selection timeout is slightly better than an illegal request. * Other errors might be better. */ if (sc->ciss_logical[bus][target].cl_status != CISS_LD_ONLINE) { csio->ccb_h.status |= CAM_SEL_TIMEOUT; xpt_done((union ccb *)csio); return(1); } /* if we have to fake Synchronise Cache */ if (sc->ciss_flags & CISS_FLAG_FAKE_SYNCH) { /* * If this is a Synchronise Cache command, typically issued when * a device is closed, flush the adapter and complete now. */ if (((csio->ccb_h.flags & CAM_CDB_POINTER) ? *(u_int8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes[0]) == SYNCHRONIZE_CACHE) { ciss_flush_adapter(sc); csio->ccb_h.status |= CAM_REQ_CMP; xpt_done((union ccb *)csio); return(1); } } /* * A CISS target can only ever have one lun per target. REPORT_LUNS requires * at least one LUN field to be pre created for us, so snag it and fill in * the least significant byte indicating 1 LUN here. Emulate the command * return to shut up warning on console of a CDB error. swb */ if (opcode == REPORT_LUNS && csio->dxfer_len > 0) { csio->data_ptr[3] = 8; csio->ccb_h.status |= CAM_REQ_CMP; xpt_done((union ccb *)csio); return(1); } return(0); } /************************************************************************ * Check for possibly-completed commands. */ static void ciss_cam_poll(struct cam_sim *sim) { cr_qhead_t qh; struct ciss_softc *sc = cam_sim_softc(sim); debug_called(2); STAILQ_INIT(&qh); if (sc->ciss_perf) ciss_perf_done(sc, &qh); else ciss_done(sc, &qh); ciss_complete(sc, &qh); } /************************************************************************ * Handle completion of a command - pass results back through the CCB */ static void ciss_cam_complete(struct ciss_request *cr) { struct ciss_softc *sc; struct ciss_command *cc; struct ciss_error_info *ce; struct ccb_scsiio *csio; int scsi_status; int command_status; debug_called(2); sc = cr->cr_sc; cc = cr->cr_cc; ce = (struct ciss_error_info *)&(cc->sg[0]); csio = (struct ccb_scsiio *)cr->cr_private; /* * Extract status values from request. */ ciss_report_request(cr, &command_status, &scsi_status); csio->scsi_status = scsi_status; /* * Handle specific SCSI status values. */ switch(scsi_status) { /* no status due to adapter error */ case -1: debug(0, "adapter error"); csio->ccb_h.status |= CAM_REQ_CMP_ERR; break; /* no status due to command completed OK */ case SCSI_STATUS_OK: /* CISS_SCSI_STATUS_GOOD */ debug(2, "SCSI_STATUS_OK"); csio->ccb_h.status |= CAM_REQ_CMP; break; /* check condition, sense data included */ case SCSI_STATUS_CHECK_COND: /* CISS_SCSI_STATUS_CHECK_CONDITION */ debug(0, "SCSI_STATUS_CHECK_COND sense size %d resid %d\n", ce->sense_length, ce->residual_count); bzero(&csio->sense_data, SSD_FULL_SIZE); bcopy(&ce->sense_info[0], &csio->sense_data, ce->sense_length); if (csio->sense_len > ce->sense_length) csio->sense_resid = csio->sense_len - ce->sense_length; else csio->sense_resid = 0; csio->resid = ce->residual_count; csio->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; #ifdef CISS_DEBUG { struct scsi_sense_data *sns = (struct scsi_sense_data *)&ce->sense_info[0]; debug(0, "sense key %x", scsi_get_sense_key(sns, csio->sense_len - csio->sense_resid, /*show_errors*/ 1)); } #endif break; case SCSI_STATUS_BUSY: /* CISS_SCSI_STATUS_BUSY */ debug(0, "SCSI_STATUS_BUSY"); csio->ccb_h.status |= CAM_SCSI_BUSY; break; default: debug(0, "unknown status 0x%x", csio->scsi_status); csio->ccb_h.status |= CAM_REQ_CMP_ERR; break; } /* handle post-command fixup */ ciss_cam_complete_fixup(sc, csio); ciss_release_request(cr); if (sc->ciss_flags & CISS_FLAG_BUSY) { sc->ciss_flags &= ~CISS_FLAG_BUSY; if (csio->ccb_h.status & CAM_RELEASE_SIMQ) xpt_release_simq(xpt_path_sim(csio->ccb_h.path), 0); else csio->ccb_h.status |= CAM_RELEASE_SIMQ; } xpt_done((union ccb *)csio); } /******************************************************************************** * Fix up the result of some commands here. */ static void ciss_cam_complete_fixup(struct ciss_softc *sc, struct ccb_scsiio *csio) { struct scsi_inquiry_data *inq; struct ciss_ldrive *cl; uint8_t *cdb; int bus, target; cdb = (csio->ccb_h.flags & CAM_CDB_POINTER) ? (uint8_t *)csio->cdb_io.cdb_ptr : csio->cdb_io.cdb_bytes; if (cdb[0] == INQUIRY && (cdb[1] & SI_EVPD) == 0 && (csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN && csio->dxfer_len >= SHORT_INQUIRY_LENGTH) { inq = (struct scsi_inquiry_data *)csio->data_ptr; target = csio->ccb_h.target_id; bus = cam_sim_bus(xpt_path_sim(csio->ccb_h.path)); /* * If the controller is in JBOD mode, there are no logical volumes. * Let the disks be probed and dealt with via CAM. Else, mask off * the physical disks and setup the parts of the inq structure for * the logical volume. swb */ if( !(sc->ciss_id->PowerUPNvramFlags & PWR_UP_FLAG_JBOD_ENABLED)){ if (CISS_IS_PHYSICAL(bus)) { if (SID_TYPE(inq) == T_DIRECT) inq->device = (inq->device & 0xe0) | T_NODEVICE; return; } cl = &sc->ciss_logical[bus][target]; padstr(inq->vendor, "HP", SID_VENDOR_SIZE); padstr(inq->product, ciss_name_ldrive_org(cl->cl_ldrive->fault_tolerance), SID_PRODUCT_SIZE); padstr(inq->revision, ciss_name_ldrive_status(cl->cl_lstatus->status), SID_REVISION_SIZE); } } } /******************************************************************************** * Name the device at (target) * * XXX is this strictly correct? */ static int ciss_name_device(struct ciss_softc *sc, int bus, int target) { struct cam_periph *periph; struct cam_path *path; int status; if (CISS_IS_PHYSICAL(bus)) return (0); status = xpt_create_path(&path, NULL, cam_sim_path(sc->ciss_cam_sim[bus]), target, 0); if (status == CAM_REQ_CMP) { xpt_path_lock(path); periph = cam_periph_find(path, NULL); xpt_path_unlock(path); xpt_free_path(path); if (periph != NULL) { sprintf(sc->ciss_logical[bus][target].cl_name, "%s%d", periph->periph_name, periph->unit_number); return(0); } } sc->ciss_logical[bus][target].cl_name[0] = 0; return(ENOENT); } /************************************************************************ * Periodic status monitoring. */ static void ciss_periodic(void *arg) { struct ciss_softc *sc; struct ciss_request *cr = NULL; struct ciss_command *cc = NULL; int error = 0; debug_called(1); sc = (struct ciss_softc *)arg; /* * Check the adapter heartbeat. */ if (sc->ciss_cfg->heartbeat == sc->ciss_heartbeat) { sc->ciss_heart_attack++; debug(0, "adapter heart attack in progress 0x%x/%d", sc->ciss_heartbeat, sc->ciss_heart_attack); if (sc->ciss_heart_attack == 3) { ciss_printf(sc, "ADAPTER HEARTBEAT FAILED\n"); ciss_disable_adapter(sc); return; } } else { sc->ciss_heartbeat = sc->ciss_cfg->heartbeat; sc->ciss_heart_attack = 0; debug(3, "new heartbeat 0x%x", sc->ciss_heartbeat); } /* * Send the NOP message and wait for a response. */ if (ciss_nop_message_heartbeat != 0 && (error = ciss_get_request(sc, &cr)) == 0) { cc = cr->cr_cc; cr->cr_complete = ciss_nop_complete; cc->cdb.cdb_length = 1; cc->cdb.type = CISS_CDB_TYPE_MESSAGE; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_WRITE; cc->cdb.timeout = 0; cc->cdb.cdb[0] = CISS_OPCODE_MESSAGE_NOP; if ((error = ciss_start(cr)) != 0) { ciss_printf(sc, "SENDING NOP MESSAGE FAILED\n"); } } /* * If the notify event request has died for some reason, or has * not started yet, restart it. */ if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK)) { debug(0, "(re)starting Event Notify chain"); ciss_notify_event(sc); } /* * Reschedule. */ callout_reset(&sc->ciss_periodic, CISS_HEARTBEAT_RATE * hz, ciss_periodic, sc); } static void ciss_nop_complete(struct ciss_request *cr) { struct ciss_softc *sc; static int first_time = 1; sc = cr->cr_sc; if (ciss_report_request(cr, NULL, NULL) != 0) { if (first_time == 1) { first_time = 0; ciss_printf(sc, "SENDING NOP MESSAGE FAILED (not logging anymore)\n"); } } ciss_release_request(cr); } /************************************************************************ * Disable the adapter. * * The all requests in completed queue is failed with hardware error. * This will cause failover in a multipath configuration. */ static void ciss_disable_adapter(struct ciss_softc *sc) { cr_qhead_t qh; struct ciss_request *cr; struct ciss_command *cc; struct ciss_error_info *ce; int i; CISS_TL_SIMPLE_DISABLE_INTERRUPTS(sc); pci_disable_busmaster(sc->ciss_dev); sc->ciss_flags &= ~CISS_FLAG_RUNNING; for (i = 1; i < sc->ciss_max_requests; i++) { cr = &sc->ciss_request[i]; if ((cr->cr_flags & CISS_REQ_BUSY) == 0) continue; cc = cr->cr_cc; ce = (struct ciss_error_info *)&(cc->sg[0]); ce->command_status = CISS_CMD_STATUS_HARDWARE_ERROR; ciss_enqueue_complete(cr, &qh); } for (;;) { if ((cr = ciss_dequeue_complete(sc, &qh)) == NULL) break; /* * If the request has a callback, invoke it. */ if (cr->cr_complete != NULL) { cr->cr_complete(cr); continue; } /* * If someone is sleeping on this request, wake them up. */ if (cr->cr_flags & CISS_REQ_SLEEP) { cr->cr_flags &= ~CISS_REQ_SLEEP; wakeup(cr); continue; } } } /************************************************************************ * Request a notification response from the adapter. * * If (cr) is NULL, this is the first request of the adapter, so * reset the adapter's message pointer and start with the oldest * message available. */ static void ciss_notify_event(struct ciss_softc *sc) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_notify_cdb *cnc; int error; debug_called(1); cr = sc->ciss_periodic_notify; /* get a request if we don't already have one */ if (cr == NULL) { if ((error = ciss_get_request(sc, &cr)) != 0) { debug(0, "can't get notify event request"); goto out; } sc->ciss_periodic_notify = cr; cr->cr_complete = ciss_notify_complete; debug(1, "acquired request %d", cr->cr_tag); } /* * Get a databuffer if we don't already have one, note that the * adapter command wants a larger buffer than the actual * structure. */ if (cr->cr_data == NULL) { if ((cr->cr_data = malloc(CISS_NOTIFY_DATA_SIZE, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) { debug(0, "can't get notify event request buffer"); error = ENOMEM; goto out; } cr->cr_length = CISS_NOTIFY_DATA_SIZE; } /* re-setup the request's command (since we never release it) XXX overkill*/ ciss_preen_command(cr); /* (re)build the notify event command */ cc = cr->cr_cc; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*cnc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 0; /* no timeout, we hope */ cnc = (struct ciss_notify_cdb *)&(cc->cdb.cdb[0]); bzero(cr->cr_data, CISS_NOTIFY_DATA_SIZE); cnc->opcode = CISS_OPCODE_READ; cnc->command = CISS_COMMAND_NOTIFY_ON_EVENT; cnc->timeout = 0; /* no timeout, we hope */ cnc->synchronous = 0; cnc->ordered = 0; cnc->seek_to_oldest = 0; if ((sc->ciss_flags & CISS_FLAG_RUNNING) == 0) cnc->new_only = 1; else cnc->new_only = 0; cnc->length = htonl(CISS_NOTIFY_DATA_SIZE); /* submit the request */ error = ciss_start(cr); out: if (error) { if (cr != NULL) { if (cr->cr_data != NULL) free(cr->cr_data, CISS_MALLOC_CLASS); ciss_release_request(cr); } sc->ciss_periodic_notify = NULL; debug(0, "can't submit notify event request"); sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; } else { debug(1, "notify event submitted"); sc->ciss_flags |= CISS_FLAG_NOTIFY_OK; } } static void ciss_notify_complete(struct ciss_request *cr) { struct ciss_command *cc; struct ciss_notify *cn; struct ciss_softc *sc; int scsi_status; int command_status; debug_called(1); cc = cr->cr_cc; cn = (struct ciss_notify *)cr->cr_data; sc = cr->cr_sc; /* * Report request results, decode status. */ ciss_report_request(cr, &command_status, &scsi_status); /* * Abort the chain on a fatal error. * * XXX which of these are actually errors? */ if ((command_status != CISS_CMD_STATUS_SUCCESS) && (command_status != CISS_CMD_STATUS_TARGET_STATUS) && (command_status != CISS_CMD_STATUS_TIMEOUT)) { /* XXX timeout? */ ciss_printf(sc, "fatal error in Notify Event request (%s)\n", ciss_name_command_status(command_status)); ciss_release_request(cr); sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; return; } /* * If the adapter gave us a text message, print it. */ if (cn->message[0] != 0) ciss_printf(sc, "*** %.80s\n", cn->message); debug(0, "notify event class %d subclass %d detail %d", cn->class, cn->subclass, cn->detail); /* * If the response indicates that the notifier has been aborted, * release the notifier command. */ if ((cn->class == CISS_NOTIFY_NOTIFIER) && (cn->subclass == CISS_NOTIFY_NOTIFIER_STATUS) && (cn->detail == 1)) { debug(0, "notifier exiting"); sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; ciss_release_request(cr); sc->ciss_periodic_notify = NULL; wakeup(&sc->ciss_periodic_notify); } else { /* Handle notify events in a kernel thread */ ciss_enqueue_notify(cr); sc->ciss_periodic_notify = NULL; wakeup(&sc->ciss_periodic_notify); wakeup(&sc->ciss_notify); } /* * Send a new notify event command, if we're not aborting. */ if (!(sc->ciss_flags & CISS_FLAG_ABORTING)) { ciss_notify_event(sc); } } /************************************************************************ * Abort the Notify Event chain. * * Note that we can't just abort the command in progress; we have to * explicitly issue an Abort Notify Event command in order for the * adapter to clean up correctly. * * If we are called with CISS_FLAG_ABORTING set in the adapter softc, * the chain will not restart itself. */ static int ciss_notify_abort(struct ciss_softc *sc) { struct ciss_request *cr; struct ciss_command *cc; struct ciss_notify_cdb *cnc; int error, command_status, scsi_status; debug_called(1); cr = NULL; error = 0; /* verify that there's an outstanding command */ if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK)) goto out; /* get a command to issue the abort with */ if ((error = ciss_get_request(sc, &cr))) goto out; /* get a buffer for the result */ if ((cr->cr_data = malloc(CISS_NOTIFY_DATA_SIZE, CISS_MALLOC_CLASS, M_NOWAIT)) == NULL) { debug(0, "can't get notify event request buffer"); error = ENOMEM; goto out; } cr->cr_length = CISS_NOTIFY_DATA_SIZE; /* build the CDB */ cc = cr->cr_cc; cc->header.address.physical.mode = CISS_HDR_ADDRESS_MODE_PERIPHERAL; cc->header.address.physical.bus = 0; cc->header.address.physical.target = 0; cc->cdb.cdb_length = sizeof(*cnc); cc->cdb.type = CISS_CDB_TYPE_COMMAND; cc->cdb.attribute = CISS_CDB_ATTRIBUTE_SIMPLE; cc->cdb.direction = CISS_CDB_DIRECTION_READ; cc->cdb.timeout = 0; /* no timeout, we hope */ cnc = (struct ciss_notify_cdb *)&(cc->cdb.cdb[0]); bzero(cnc, sizeof(*cnc)); cnc->opcode = CISS_OPCODE_WRITE; cnc->command = CISS_COMMAND_ABORT_NOTIFY; cnc->length = htonl(CISS_NOTIFY_DATA_SIZE); ciss_print_request(cr); /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "Abort Notify Event command failed (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, &scsi_status); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: break; case CISS_CMD_STATUS_INVALID_COMMAND: /* * Some older adapters don't support the CISS version of this * command. Fall back to using the BMIC version. */ error = ciss_notify_abort_bmic(sc); if (error != 0) goto out; break; case CISS_CMD_STATUS_TARGET_STATUS: /* * This can happen if the adapter thinks there wasn't an outstanding * Notify Event command but we did. We clean up here. */ if (scsi_status == CISS_SCSI_STATUS_CHECK_CONDITION) { if (sc->ciss_periodic_notify != NULL) ciss_release_request(sc->ciss_periodic_notify); error = 0; goto out; } /* FALLTHROUGH */ default: ciss_printf(sc, "Abort Notify Event command failed (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } /* * Sleep waiting for the notifier command to complete. Note * that if it doesn't, we may end up in a bad situation, since * the adapter may deliver it later. Also note that the adapter * requires the Notify Event command to be cancelled in order to * maintain internal bookkeeping. */ while (sc->ciss_periodic_notify != NULL) { error = msleep(&sc->ciss_periodic_notify, &sc->ciss_mtx, PRIBIO, "cissNEA", hz * 5); if (error == EWOULDBLOCK) { ciss_printf(sc, "Notify Event command failed to abort, adapter may wedge.\n"); break; } } out: /* release the cancel request */ if (cr != NULL) { if (cr->cr_data != NULL) free(cr->cr_data, CISS_MALLOC_CLASS); ciss_release_request(cr); } if (error == 0) sc->ciss_flags &= ~CISS_FLAG_NOTIFY_OK; return(error); } /************************************************************************ * Abort the Notify Event chain using a BMIC command. */ static int ciss_notify_abort_bmic(struct ciss_softc *sc) { struct ciss_request *cr; int error, command_status; debug_called(1); cr = NULL; error = 0; /* verify that there's an outstanding command */ if (!(sc->ciss_flags & CISS_FLAG_NOTIFY_OK)) goto out; /* * Build a BMIC command to cancel the Notify on Event command. * * Note that we are sending a CISS opcode here. Odd. */ if ((error = ciss_get_bmic_request(sc, &cr, CISS_COMMAND_ABORT_NOTIFY, NULL, 0)) != 0) goto out; /* * Submit the request and wait for it to complete. */ if ((error = ciss_synch_request(cr, 60 * 1000)) != 0) { ciss_printf(sc, "error sending BMIC Cancel Notify on Event command (%d)\n", error); goto out; } /* * Check response. */ ciss_report_request(cr, &command_status, NULL); switch(command_status) { case CISS_CMD_STATUS_SUCCESS: break; default: ciss_printf(sc, "error cancelling Notify on Event (%s)\n", ciss_name_command_status(command_status)); error = EIO; goto out; } out: if (cr != NULL) ciss_release_request(cr); return(error); } /************************************************************************ * Handle rescanning all the logical volumes when a notify event * causes the drives to come online or offline. */ static void ciss_notify_rescan_logical(struct ciss_softc *sc) { struct ciss_lun_report *cll; struct ciss_ldrive *ld; int i, j, ndrives; /* * We must rescan all logical volumes to get the right logical * drive address. */ cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_LOGICAL_LUNS, sc->ciss_cfg->max_logical_supported); if (cll == NULL) return; ndrives = (ntohl(cll->list_size) / sizeof(union ciss_device_address)); /* * Delete any of the drives which were destroyed by the * firmware. */ for (i = 0; i < sc->ciss_max_logical_bus; i++) { for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) { ld = &sc->ciss_logical[i][j]; if (ld->cl_update == 0) continue; if (ld->cl_status != CISS_LD_ONLINE) { ciss_cam_rescan_target(sc, i, j); ld->cl_update = 0; if (ld->cl_ldrive) free(ld->cl_ldrive, CISS_MALLOC_CLASS); if (ld->cl_lstatus) free(ld->cl_lstatus, CISS_MALLOC_CLASS); ld->cl_ldrive = NULL; ld->cl_lstatus = NULL; } } } /* * Scan for new drives. */ for (i = 0; i < ndrives; i++) { int bus, target; bus = CISS_LUN_TO_BUS(cll->lun[i].logical.lun); target = CISS_LUN_TO_TARGET(cll->lun[i].logical.lun); ld = &sc->ciss_logical[bus][target]; if (ld->cl_update == 0) continue; ld->cl_update = 0; ld->cl_address = cll->lun[i]; ld->cl_controller = &sc->ciss_controllers[bus]; if (ciss_identify_logical(sc, ld) == 0) { ciss_cam_rescan_target(sc, bus, target); } } free(cll, CISS_MALLOC_CLASS); } /************************************************************************ * Handle a notify event relating to the status of a logical drive. * * XXX need to be able to defer some of these to properly handle * calling the "ID Physical drive" command, unless the 'extended' * drive IDs are always in BIG_MAP format. */ static void ciss_notify_logical(struct ciss_softc *sc, struct ciss_notify *cn) { struct ciss_ldrive *ld; int ostatus, bus, target; debug_called(2); bus = cn->device.physical.bus; target = cn->data.logical_status.logical_drive; ld = &sc->ciss_logical[bus][target]; switch (cn->subclass) { case CISS_NOTIFY_LOGICAL_STATUS: switch (cn->detail) { case 0: ciss_name_device(sc, bus, target); ciss_printf(sc, "logical drive %d (%s) changed status %s->%s, spare status 0x%b\n", cn->data.logical_status.logical_drive, ld->cl_name, ciss_name_ldrive_status(cn->data.logical_status.previous_state), ciss_name_ldrive_status(cn->data.logical_status.new_state), cn->data.logical_status.spare_state, "\20\1configured\2rebuilding\3failed\4in use\5available\n"); /* * Update our idea of the drive's status. */ ostatus = ciss_decode_ldrive_status(cn->data.logical_status.previous_state); ld->cl_status = ciss_decode_ldrive_status(cn->data.logical_status.new_state); if (ld->cl_lstatus != NULL) ld->cl_lstatus->status = cn->data.logical_status.new_state; /* * Have CAM rescan the drive if its status has changed. */ if (ostatus != ld->cl_status) { ld->cl_update = 1; ciss_notify_rescan_logical(sc); } break; case 1: /* logical drive has recognised new media, needs Accept Media Exchange */ ciss_name_device(sc, bus, target); ciss_printf(sc, "logical drive %d (%s) media exchanged, ready to go online\n", cn->data.logical_status.logical_drive, ld->cl_name); ciss_accept_media(sc, ld); ld->cl_update = 1; ld->cl_status = ciss_decode_ldrive_status(cn->data.logical_status.new_state); ciss_notify_rescan_logical(sc); break; case 2: case 3: ciss_printf(sc, "rebuild of logical drive %d (%s) failed due to %s error\n", cn->data.rebuild_aborted.logical_drive, ld->cl_name, (cn->detail == 2) ? "read" : "write"); break; } break; case CISS_NOTIFY_LOGICAL_ERROR: if (cn->detail == 0) { ciss_printf(sc, "FATAL I/O ERROR on logical drive %d (%s), SCSI port %d ID %d\n", cn->data.io_error.logical_drive, ld->cl_name, cn->data.io_error.failure_bus, cn->data.io_error.failure_drive); /* XXX should we take the drive down at this point, or will we be told? */ } break; case CISS_NOTIFY_LOGICAL_SURFACE: if (cn->detail == 0) ciss_printf(sc, "logical drive %d (%s) completed consistency initialisation\n", cn->data.consistency_completed.logical_drive, ld->cl_name); break; } } /************************************************************************ * Handle a notify event relating to the status of a physical drive. */ static void ciss_notify_physical(struct ciss_softc *sc, struct ciss_notify *cn) { } /************************************************************************ * Handle a notify event relating to the status of a physical drive. */ static void ciss_notify_hotplug(struct ciss_softc *sc, struct ciss_notify *cn) { struct ciss_lun_report *cll = NULL; int bus, target; switch (cn->subclass) { case CISS_NOTIFY_HOTPLUG_PHYSICAL: case CISS_NOTIFY_HOTPLUG_NONDISK: bus = CISS_BIG_MAP_BUS(sc, cn->data.drive.big_physical_drive_number); target = CISS_BIG_MAP_TARGET(sc, cn->data.drive.big_physical_drive_number); if (cn->detail == 0) { /* * Mark the device offline so that it'll start producing selection * timeouts to the upper layer. */ if ((bus >= 0) && (target >= 0)) sc->ciss_physical[bus][target].cp_online = 0; } else { /* * Rescan the physical lun list for new items */ cll = ciss_report_luns(sc, CISS_OPCODE_REPORT_PHYSICAL_LUNS, sc->ciss_cfg->max_physical_supported); if (cll == NULL) { ciss_printf(sc, "Warning, cannot get physical lun list\n"); break; } ciss_filter_physical(sc, cll); } break; default: ciss_printf(sc, "Unknown hotplug event %d\n", cn->subclass); return; } if (cll != NULL) free(cll, CISS_MALLOC_CLASS); } /************************************************************************ * Handle deferred processing of notify events. Notify events may need * sleep which is unsafe during an interrupt. */ static void ciss_notify_thread(void *arg) { struct ciss_softc *sc; struct ciss_request *cr; struct ciss_notify *cn; sc = (struct ciss_softc *)arg; mtx_lock(&sc->ciss_mtx); for (;;) { if (STAILQ_EMPTY(&sc->ciss_notify) != 0 && (sc->ciss_flags & CISS_FLAG_THREAD_SHUT) == 0) { msleep(&sc->ciss_notify, &sc->ciss_mtx, PUSER, "idle", 0); } if (sc->ciss_flags & CISS_FLAG_THREAD_SHUT) break; cr = ciss_dequeue_notify(sc); if (cr == NULL) panic("cr null"); cn = (struct ciss_notify *)cr->cr_data; switch (cn->class) { case CISS_NOTIFY_HOTPLUG: ciss_notify_hotplug(sc, cn); break; case CISS_NOTIFY_LOGICAL: ciss_notify_logical(sc, cn); break; case CISS_NOTIFY_PHYSICAL: ciss_notify_physical(sc, cn); break; } ciss_release_request(cr); } sc->ciss_notify_thread = NULL; wakeup(&sc->ciss_notify_thread); mtx_unlock(&sc->ciss_mtx); kproc_exit(0); } /************************************************************************ * Start the notification kernel thread. */ static void ciss_spawn_notify_thread(struct ciss_softc *sc) { if (kproc_create((void(*)(void *))ciss_notify_thread, sc, &sc->ciss_notify_thread, 0, 0, "ciss_notify%d", device_get_unit(sc->ciss_dev))) panic("Could not create notify thread\n"); } /************************************************************************ * Kill the notification kernel thread. */ static void ciss_kill_notify_thread(struct ciss_softc *sc) { if (sc->ciss_notify_thread == NULL) return; sc->ciss_flags |= CISS_FLAG_THREAD_SHUT; wakeup(&sc->ciss_notify); msleep(&sc->ciss_notify_thread, &sc->ciss_mtx, PUSER, "thtrm", 0); } /************************************************************************ * Print a request. */ static void ciss_print_request(struct ciss_request *cr) { struct ciss_softc *sc; struct ciss_command *cc; int i; sc = cr->cr_sc; cc = cr->cr_cc; ciss_printf(sc, "REQUEST @ %p\n", cr); ciss_printf(sc, " data %p/%d tag %d flags %b\n", cr->cr_data, cr->cr_length, cr->cr_tag, cr->cr_flags, "\20\1mapped\2sleep\3poll\4dataout\5datain\n"); ciss_printf(sc, " sg list/total %d/%d host tag 0x%x\n", cc->header.sg_in_list, cc->header.sg_total, cc->header.host_tag); switch(cc->header.address.mode.mode) { case CISS_HDR_ADDRESS_MODE_PERIPHERAL: case CISS_HDR_ADDRESS_MODE_MASK_PERIPHERAL: ciss_printf(sc, " physical bus %d target %d\n", cc->header.address.physical.bus, cc->header.address.physical.target); break; case CISS_HDR_ADDRESS_MODE_LOGICAL: ciss_printf(sc, " logical unit %d\n", cc->header.address.logical.lun); break; } ciss_printf(sc, " %s cdb length %d type %s attribute %s\n", (cc->cdb.direction == CISS_CDB_DIRECTION_NONE) ? "no-I/O" : (cc->cdb.direction == CISS_CDB_DIRECTION_READ) ? "READ" : (cc->cdb.direction == CISS_CDB_DIRECTION_WRITE) ? "WRITE" : "??", cc->cdb.cdb_length, (cc->cdb.type == CISS_CDB_TYPE_COMMAND) ? "command" : (cc->cdb.type == CISS_CDB_TYPE_MESSAGE) ? "message" : "??", (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_UNTAGGED) ? "untagged" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_SIMPLE) ? "simple" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_HEAD_OF_QUEUE) ? "head-of-queue" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_ORDERED) ? "ordered" : (cc->cdb.attribute == CISS_CDB_ATTRIBUTE_AUTO_CONTINGENT) ? "auto-contingent" : "??"); ciss_printf(sc, " %*D\n", cc->cdb.cdb_length, &cc->cdb.cdb[0], " "); if (cc->header.host_tag & CISS_HDR_HOST_TAG_ERROR) { /* XXX print error info */ } else { /* since we don't use chained s/g, don't support it here */ for (i = 0; i < cc->header.sg_in_list; i++) { if ((i % 4) == 0) ciss_printf(sc, " "); printf("0x%08x/%d ", (u_int32_t)cc->sg[i].address, cc->sg[i].length); if ((((i + 1) % 4) == 0) || (i == (cc->header.sg_in_list - 1))) printf("\n"); } } } /************************************************************************ * Print information about the status of a logical drive. */ static void ciss_print_ldrive(struct ciss_softc *sc, struct ciss_ldrive *ld) { int bus, target, i; if (ld->cl_lstatus == NULL) { printf("does not exist\n"); return; } /* print drive status */ switch(ld->cl_lstatus->status) { case CISS_LSTATUS_OK: printf("online\n"); break; case CISS_LSTATUS_INTERIM_RECOVERY: printf("in interim recovery mode\n"); break; case CISS_LSTATUS_READY_RECOVERY: printf("ready to begin recovery\n"); break; case CISS_LSTATUS_RECOVERING: bus = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_rebuilding); target = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_rebuilding); printf("being recovered, working on physical drive %d.%d, %u blocks remaining\n", bus, target, ld->cl_lstatus->blocks_to_recover); break; case CISS_LSTATUS_EXPANDING: printf("being expanded, %u blocks remaining\n", ld->cl_lstatus->blocks_to_recover); break; case CISS_LSTATUS_QUEUED_FOR_EXPANSION: printf("queued for expansion\n"); break; case CISS_LSTATUS_FAILED: printf("queued for expansion\n"); break; case CISS_LSTATUS_WRONG_PDRIVE: printf("wrong physical drive inserted\n"); break; case CISS_LSTATUS_MISSING_PDRIVE: printf("missing a needed physical drive\n"); break; case CISS_LSTATUS_BECOMING_READY: printf("becoming ready\n"); break; } /* print failed physical drives */ for (i = 0; i < CISS_BIG_MAP_ENTRIES / 8; i++) { bus = CISS_BIG_MAP_BUS(sc, ld->cl_lstatus->drive_failure_map[i]); target = CISS_BIG_MAP_TARGET(sc, ld->cl_lstatus->drive_failure_map[i]); if (bus == -1) continue; ciss_printf(sc, "physical drive %d:%d (%x) failed\n", bus, target, ld->cl_lstatus->drive_failure_map[i]); } } #ifdef CISS_DEBUG #include "opt_ddb.h" #ifdef DDB #include /************************************************************************ * Print information about the controller/driver. */ static void ciss_print_adapter(struct ciss_softc *sc) { int i, j; ciss_printf(sc, "ADAPTER:\n"); for (i = 0; i < CISSQ_COUNT; i++) { ciss_printf(sc, "%s %d/%d\n", i == 0 ? "free" : i == 1 ? "busy" : "complete", sc->ciss_qstat[i].q_length, sc->ciss_qstat[i].q_max); } ciss_printf(sc, "max_requests %d\n", sc->ciss_max_requests); ciss_printf(sc, "flags %b\n", sc->ciss_flags, "\20\1notify_ok\2control_open\3aborting\4running\21fake_synch\22bmic_abort\n"); for (i = 0; i < sc->ciss_max_logical_bus; i++) { for (j = 0; j < sc->ciss_cfg->max_logical_supported; j++) { ciss_printf(sc, "LOGICAL DRIVE %d: ", i); ciss_print_ldrive(sc, &sc->ciss_logical[i][j]); } } /* XXX Should physical drives be printed out here? */ for (i = 1; i < sc->ciss_max_requests; i++) ciss_print_request(sc->ciss_request + i); } /* DDB hook */ DB_COMMAND(ciss_prt, db_ciss_prt) { struct ciss_softc *sc; devclass_t dc; int maxciss, i; dc = devclass_find("ciss"); if ( dc == NULL ) { printf("%s: can't find devclass!\n", __func__); return; } maxciss = devclass_get_maxunit(dc); for (i = 0; i < maxciss; i++) { sc = devclass_get_softc(dc, i); ciss_print_adapter(sc); } } #endif #endif /************************************************************************ * Return a name for a logical drive status value. */ static const char * ciss_name_ldrive_status(int status) { switch (status) { case CISS_LSTATUS_OK: return("OK"); case CISS_LSTATUS_FAILED: return("failed"); case CISS_LSTATUS_NOT_CONFIGURED: return("not configured"); case CISS_LSTATUS_INTERIM_RECOVERY: return("interim recovery"); case CISS_LSTATUS_READY_RECOVERY: return("ready for recovery"); case CISS_LSTATUS_RECOVERING: return("recovering"); case CISS_LSTATUS_WRONG_PDRIVE: return("wrong physical drive inserted"); case CISS_LSTATUS_MISSING_PDRIVE: return("missing physical drive"); case CISS_LSTATUS_EXPANDING: return("expanding"); case CISS_LSTATUS_BECOMING_READY: return("becoming ready"); case CISS_LSTATUS_QUEUED_FOR_EXPANSION: return("queued for expansion"); } return("unknown status"); } /************************************************************************ * Return an online/offline/nonexistent value for a logical drive * status value. */ static int ciss_decode_ldrive_status(int status) { switch(status) { case CISS_LSTATUS_NOT_CONFIGURED: return(CISS_LD_NONEXISTENT); case CISS_LSTATUS_OK: case CISS_LSTATUS_INTERIM_RECOVERY: case CISS_LSTATUS_READY_RECOVERY: case CISS_LSTATUS_RECOVERING: case CISS_LSTATUS_EXPANDING: case CISS_LSTATUS_QUEUED_FOR_EXPANSION: return(CISS_LD_ONLINE); case CISS_LSTATUS_FAILED: case CISS_LSTATUS_WRONG_PDRIVE: case CISS_LSTATUS_MISSING_PDRIVE: case CISS_LSTATUS_BECOMING_READY: default: return(CISS_LD_OFFLINE); } } /************************************************************************ * Return a name for a logical drive's organisation. */ static const char * ciss_name_ldrive_org(int org) { switch(org) { case CISS_LDRIVE_RAID0: return("RAID 0"); case CISS_LDRIVE_RAID1: return("RAID 1(1+0)"); case CISS_LDRIVE_RAID4: return("RAID 4"); case CISS_LDRIVE_RAID5: return("RAID 5"); case CISS_LDRIVE_RAID51: return("RAID 5+1"); case CISS_LDRIVE_RAIDADG: return("RAID ADG"); } return("unknown"); } /************************************************************************ * Return a name for a command status value. */ static const char * ciss_name_command_status(int status) { switch(status) { case CISS_CMD_STATUS_SUCCESS: return("success"); case CISS_CMD_STATUS_TARGET_STATUS: return("target status"); case CISS_CMD_STATUS_DATA_UNDERRUN: return("data underrun"); case CISS_CMD_STATUS_DATA_OVERRUN: return("data overrun"); case CISS_CMD_STATUS_INVALID_COMMAND: return("invalid command"); case CISS_CMD_STATUS_PROTOCOL_ERROR: return("protocol error"); case CISS_CMD_STATUS_HARDWARE_ERROR: return("hardware error"); case CISS_CMD_STATUS_CONNECTION_LOST: return("connection lost"); case CISS_CMD_STATUS_ABORTED: return("aborted"); case CISS_CMD_STATUS_ABORT_FAILED: return("abort failed"); case CISS_CMD_STATUS_UNSOLICITED_ABORT: return("unsolicited abort"); case CISS_CMD_STATUS_TIMEOUT: return("timeout"); case CISS_CMD_STATUS_UNABORTABLE: return("unabortable"); } return("unknown status"); } /************************************************************************ * Handle an open on the control device. */ static int ciss_open(struct cdev *dev, int flags, int fmt, struct thread *p) { struct ciss_softc *sc; debug_called(1); sc = (struct ciss_softc *)dev->si_drv1; /* we might want to veto if someone already has us open */ mtx_lock(&sc->ciss_mtx); sc->ciss_flags |= CISS_FLAG_CONTROL_OPEN; mtx_unlock(&sc->ciss_mtx); return(0); } /************************************************************************ * Handle the last close on the control device. */ static int ciss_close(struct cdev *dev, int flags, int fmt, struct thread *p) { struct ciss_softc *sc; debug_called(1); sc = (struct ciss_softc *)dev->si_drv1; mtx_lock(&sc->ciss_mtx); sc->ciss_flags &= ~CISS_FLAG_CONTROL_OPEN; mtx_unlock(&sc->ciss_mtx); return (0); } /******************************************************************************** * Handle adapter-specific control operations. * * Note that the API here is compatible with the Linux driver, in order to * simplify the porting of Compaq's userland tools. */ static int ciss_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *p) { struct ciss_softc *sc; IOCTL_Command_struct *ioc = (IOCTL_Command_struct *)addr; #ifdef __amd64__ IOCTL_Command_struct32 *ioc32 = (IOCTL_Command_struct32 *)addr; IOCTL_Command_struct ioc_swab; #endif int error; debug_called(1); sc = (struct ciss_softc *)dev->si_drv1; error = 0; mtx_lock(&sc->ciss_mtx); switch(cmd) { case CCISS_GETQSTATS: { union ciss_statrequest *cr = (union ciss_statrequest *)addr; switch (cr->cs_item) { case CISSQ_FREE: case CISSQ_NOTIFY: bcopy(&sc->ciss_qstat[cr->cs_item], &cr->cs_qstat, sizeof(struct ciss_qstat)); break; default: error = ENOIOCTL; break; } break; } case CCISS_GETPCIINFO: { cciss_pci_info_struct *pis = (cciss_pci_info_struct *)addr; pis->bus = pci_get_bus(sc->ciss_dev); pis->dev_fn = pci_get_slot(sc->ciss_dev); pis->board_id = (pci_get_subvendor(sc->ciss_dev) << 16) | pci_get_subdevice(sc->ciss_dev); break; } case CCISS_GETINTINFO: { cciss_coalint_struct *cis = (cciss_coalint_struct *)addr; cis->delay = sc->ciss_cfg->interrupt_coalesce_delay; cis->count = sc->ciss_cfg->interrupt_coalesce_count; break; } case CCISS_SETINTINFO: { cciss_coalint_struct *cis = (cciss_coalint_struct *)addr; if ((cis->delay == 0) && (cis->count == 0)) { error = EINVAL; break; } /* * XXX apparently this is only safe if the controller is idle, * we should suspend it before doing this. */ sc->ciss_cfg->interrupt_coalesce_delay = cis->delay; sc->ciss_cfg->interrupt_coalesce_count = cis->count; if (ciss_update_config(sc)) error = EIO; /* XXX resume the controller here */ break; } case CCISS_GETNODENAME: bcopy(sc->ciss_cfg->server_name, (NodeName_type *)addr, sizeof(NodeName_type)); break; case CCISS_SETNODENAME: bcopy((NodeName_type *)addr, sc->ciss_cfg->server_name, sizeof(NodeName_type)); if (ciss_update_config(sc)) error = EIO; break; case CCISS_GETHEARTBEAT: *(Heartbeat_type *)addr = sc->ciss_cfg->heartbeat; break; case CCISS_GETBUSTYPES: *(BusTypes_type *)addr = sc->ciss_cfg->bus_types; break; case CCISS_GETFIRMVER: bcopy(sc->ciss_id->running_firmware_revision, (FirmwareVer_type *)addr, sizeof(FirmwareVer_type)); break; case CCISS_GETDRIVERVER: *(DriverVer_type *)addr = CISS_DRIVER_VERSION; break; case CCISS_REVALIDVOLS: /* * This is a bit ugly; to do it "right" we really need * to find any disks that have changed, kick CAM off them, * then rescan only these disks. It'd be nice if they * a) told us which disk(s) they were going to play with, * and b) which ones had arrived. 8( */ break; #ifdef __amd64__ case CCISS_PASSTHRU32: ioc_swab.LUN_info = ioc32->LUN_info; ioc_swab.Request = ioc32->Request; ioc_swab.error_info = ioc32->error_info; ioc_swab.buf_size = ioc32->buf_size; ioc_swab.buf = (u_int8_t *)(uintptr_t)ioc32->buf; ioc = &ioc_swab; /* FALLTHROUGH */ #endif case CCISS_PASSTHRU: error = ciss_user_command(sc, ioc); break; default: debug(0, "unknown ioctl 0x%lx", cmd); debug(1, "CCISS_GETPCIINFO: 0x%lx", CCISS_GETPCIINFO); debug(1, "CCISS_GETINTINFO: 0x%lx", CCISS_GETINTINFO); debug(1, "CCISS_SETINTINFO: 0x%lx", CCISS_SETINTINFO); debug(1, "CCISS_GETNODENAME: 0x%lx", CCISS_GETNODENAME); debug(1, "CCISS_SETNODENAME: 0x%lx", CCISS_SETNODENAME); debug(1, "CCISS_GETHEARTBEAT: 0x%lx", CCISS_GETHEARTBEAT); debug(1, "CCISS_GETBUSTYPES: 0x%lx", CCISS_GETBUSTYPES); debug(1, "CCISS_GETFIRMVER: 0x%lx", CCISS_GETFIRMVER); debug(1, "CCISS_GETDRIVERVER: 0x%lx", CCISS_GETDRIVERVER); debug(1, "CCISS_REVALIDVOLS: 0x%lx", CCISS_REVALIDVOLS); debug(1, "CCISS_PASSTHRU: 0x%lx", CCISS_PASSTHRU); error = ENOIOCTL; break; } mtx_unlock(&sc->ciss_mtx); return(error); } Index: head/sys/dev/cxgbe/crypto/t4_crypto.c =================================================================== --- head/sys/dev/cxgbe/crypto/t4_crypto.c (revision 327948) +++ head/sys/dev/cxgbe/crypto/t4_crypto.c (revision 327949) @@ -1,2234 +1,2234 @@ /*- * Copyright (c) 2017 Chelsio Communications, Inc. * All rights reserved. * Written by: John Baldwin * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include "cryptodev_if.h" #include "common/common.h" #include "crypto/t4_crypto.h" /* * Requests consist of: * * +-------------------------------+ * | struct fw_crypto_lookaside_wr | * +-------------------------------+ * | struct ulp_txpkt | * +-------------------------------+ * | struct ulptx_idata | * +-------------------------------+ * | struct cpl_tx_sec_pdu | * +-------------------------------+ * | struct cpl_tls_tx_scmd_fmt | * +-------------------------------+ * | key context header | * +-------------------------------+ * | AES key | ----- For requests with AES * +-------------------------------+ - * | IPAD (16-byte aligned) | \ * +-------------------------------+ +---- For requests with HMAC * | OPAD (16-byte aligned) | / * +-------------------------------+ - * | GMAC H | ----- For AES-GCM * +-------------------------------+ - * | struct cpl_rx_phys_dsgl | \ * +-------------------------------+ +---- Destination buffer for * | PHYS_DSGL entries | / non-hash-only requests * +-------------------------------+ - * | 16 dummy bytes | ----- Only for hash-only requests * +-------------------------------+ * | IV | ----- If immediate IV * +-------------------------------+ * | Payload | ----- If immediate Payload * +-------------------------------+ - * | struct ulptx_sgl | \ * +-------------------------------+ +---- If payload via SGL * | SGL entries | / * +-------------------------------+ - * * Note that the key context must be padded to ensure 16-byte alignment. * For HMAC requests, the key consists of the partial hash of the IPAD * followed by the partial hash of the OPAD. * * Replies consist of: * * +-------------------------------+ * | struct cpl_fw6_pld | * +-------------------------------+ * | hash digest | ----- For HMAC request with * +-------------------------------+ 'hash_size' set in work request * * A 32-bit big-endian error status word is supplied in the last 4 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a * "MAC" error and bit 1 indicates a "PAD" error. * * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message * in the request is returned in data[1] of the CPL_FW6_PLD message. * * For block cipher replies, the updated IV is supplied in data[2] and * data[3] of the CPL_FW6_PLD message. * * For hash replies where the work request set 'hash_size' to request * a copy of the hash in the reply, the hash digest is supplied * immediately following the CPL_FW6_PLD message. */ /* * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 * SG entries. */ #define MAX_RX_PHYS_DSGL_SGE 32 #define DSGL_SGE_MAXLEN 65535 /* * The adapter only supports requests with a total input or output * length of 64k-1 or smaller. Longer requests either result in hung * requests or incorrect results. */ #define MAX_REQUEST_SIZE 65535 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto"); struct ccr_session_hmac { struct auth_hash *auth_hash; int hash_len; unsigned int partial_digest_len; unsigned int auth_mode; unsigned int mk_size; char ipad[CHCR_HASH_MAX_BLOCK_SIZE_128]; char opad[CHCR_HASH_MAX_BLOCK_SIZE_128]; }; struct ccr_session_gmac { int hash_len; char ghash_h[GMAC_BLOCK_LEN]; }; struct ccr_session_blkcipher { unsigned int cipher_mode; unsigned int key_len; unsigned int iv_len; __be32 key_ctx_hdr; char enckey[CHCR_AES_MAX_KEY_LEN]; char deckey[CHCR_AES_MAX_KEY_LEN]; }; struct ccr_session { bool active; int pending; enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode; union { struct ccr_session_hmac hmac; struct ccr_session_gmac gmac; }; struct ccr_session_blkcipher blkcipher; }; struct ccr_softc { struct adapter *adapter; device_t dev; uint32_t cid; int tx_channel_id; struct ccr_session *sessions; int nsessions; struct mtx lock; bool detaching; struct sge_wrq *txq; struct sge_rxq *rxq; /* * Pre-allocate S/G lists used when preparing a work request. * 'sg_crp' contains an sglist describing the entire buffer * for a 'struct cryptop'. 'sg_ulptx' is used to describe * the data the engine should DMA as input via ULPTX_SGL. * 'sg_dsgl' is used to describe the destination that cipher * text and a tag should be written to. */ struct sglist *sg_crp; struct sglist *sg_ulptx; struct sglist *sg_dsgl; /* Statistics. */ uint64_t stats_blkcipher_encrypt; uint64_t stats_blkcipher_decrypt; uint64_t stats_hmac; uint64_t stats_authenc_encrypt; uint64_t stats_authenc_decrypt; uint64_t stats_gcm_encrypt; uint64_t stats_gcm_decrypt; uint64_t stats_wr_nomem; uint64_t stats_inflight; uint64_t stats_mac_error; uint64_t stats_pad_error; uint64_t stats_bad_session; uint64_t stats_sglist_error; uint64_t stats_process_error; }; /* * Crypto requests involve two kind of scatter/gather lists. * * Non-hash-only requests require a PHYS_DSGL that describes the * location to store the results of the encryption or decryption * operation. This SGL uses a different format (PHYS_DSGL) and should * exclude the crd_skip bytes at the start of the data as well as * any AAD or IV. For authenticated encryption requests it should * cover include the destination of the hash or tag. * * The input payload may either be supplied inline as immediate data, * or via a standard ULP_TX SGL. This SGL should include AAD, * ciphertext, and the hash or tag for authenticated decryption * requests. * * These scatter/gather lists can describe different subsets of the * buffer described by the crypto operation. ccr_populate_sglist() * generates a scatter/gather list that covers the entire crypto * operation buffer that is then used to construct the other * scatter/gather lists. */ static int ccr_populate_sglist(struct sglist *sg, struct cryptop *crp) { int error; sglist_reset(sg); if (crp->crp_flags & CRYPTO_F_IMBUF) error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf); else if (crp->crp_flags & CRYPTO_F_IOV) error = sglist_append_uio(sg, (struct uio *)crp->crp_buf); else error = sglist_append(sg, crp->crp_buf, crp->crp_ilen); return (error); } /* * Segments in 'sg' larger than 'maxsegsize' are counted as multiple * segments. */ static int ccr_count_sgl(struct sglist *sg, int maxsegsize) { int i, nsegs; nsegs = 0; for (i = 0; i < sg->sg_nseg; i++) nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize); return (nsegs); } /* These functions deal with PHYS_DSGL for the reply buffer. */ static inline int ccr_phys_dsgl_len(int nsegs) { int len; len = (nsegs / 8) * sizeof(struct phys_sge_pairs); if ((nsegs % 8) != 0) { len += sizeof(uint16_t) * 8; len += roundup2(nsegs % 8, 2) * sizeof(uint64_t); } return (len); } static void ccr_write_phys_dsgl(struct ccr_softc *sc, void *dst, int nsegs) { struct sglist *sg; struct cpl_rx_phys_dsgl *cpl; struct phys_sge_pairs *sgl; vm_paddr_t paddr; size_t seglen; u_int i, j; sg = sc->sg_dsgl; cpl = dst; cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) | V_CPL_RX_PHYS_DSGL_ISRDMA(0)); cpl->pcirlxorder_to_noofsgentr = htobe32( V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) | V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) | V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) | V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs)); cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; cpl->rss_hdr_int.qid = htobe16(sc->rxq->iq.abs_id); cpl->rss_hdr_int.hash_val = 0; sgl = (struct phys_sge_pairs *)(cpl + 1); j = 0; for (i = 0; i < sg->sg_nseg; i++) { seglen = sg->sg_segs[i].ss_len; paddr = sg->sg_segs[i].ss_paddr; do { sgl->addr[j] = htobe64(paddr); if (seglen > DSGL_SGE_MAXLEN) { sgl->len[j] = htobe16(DSGL_SGE_MAXLEN); paddr += DSGL_SGE_MAXLEN; seglen -= DSGL_SGE_MAXLEN; } else { sgl->len[j] = htobe16(seglen); seglen = 0; } j++; if (j == 8) { sgl++; j = 0; } } while (seglen != 0); } MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs); } /* These functions deal with the ULPTX_SGL for input payload. */ static inline int ccr_ulptx_sgl_len(int nsegs) { u_int n; nsegs--; /* first segment is part of ulptx_sgl */ n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1)); return (roundup2(n, 16)); } static void ccr_write_ulptx_sgl(struct ccr_softc *sc, void *dst, int nsegs) { struct ulptx_sgl *usgl; struct sglist *sg; struct sglist_seg *ss; int i; sg = sc->sg_ulptx; MPASS(nsegs == sg->sg_nseg); ss = &sg->sg_segs[0]; usgl = dst; usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) | V_ULPTX_NSGE(nsegs)); usgl->len0 = htobe32(ss->ss_len); usgl->addr0 = htobe64(ss->ss_paddr); ss++; for (i = 0; i < sg->sg_nseg - 1; i++) { usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len); usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr); ss++; } } static bool ccr_use_imm_data(u_int transhdr_len, u_int input_len) { if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN) return (false); if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) > SGE_MAX_WR_LEN) return (false); return (true); } static void ccr_populate_wreq(struct ccr_softc *sc, struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, uint32_t sid, u_int imm_len, u_int sgl_len, u_int hash_size, u_int iv_loc, struct cryptop *crp) { u_int cctx_size; cctx_size = sizeof(struct _key_ctx) + kctx_len; crwr->wreq.op_to_cctx_size = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) | V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) | V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) | V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) | V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4)); crwr->wreq.len16_pkd = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16)); crwr->wreq.session_id = htobe32(sid); crwr->wreq.rx_chid_to_rx_q_id = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(sc->tx_channel_id) | V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) | V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) | V_FW_CRYPTO_LOOKASIDE_WR_IV(iv_loc) | V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) | V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) | V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(sc->rxq->iq.abs_id)); crwr->wreq.key_addr = 0; crwr->wreq.pld_size_hash_size = htobe32( V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) | V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size)); crwr->wreq.cookie = htobe64((uintptr_t)crp); crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DATAMODIFY(0) | V_ULP_TXPKT_CHANNELID(sc->tx_channel_id) | V_ULP_TXPKT_DEST(0) | V_ULP_TXPKT_FID(0) | V_ULP_TXPKT_RO(1)); crwr->ulptx.len = htobe32( ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16)); crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) | V_ULP_TX_SC_MORE(imm_len != 0 ? 0 : 1)); crwr->sc_imm.len = htobe32(wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len); } static int ccr_hmac(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s, struct cryptop *crp) { struct chcr_wr *crwr; struct wrqe *wr; struct auth_hash *axf; struct cryptodesc *crd; char *dst; u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len; u_int imm_len, iopad_size; int error, sgl_nsegs, sgl_len; crd = crp->crp_desc; /* Reject requests with too large of an input buffer. */ if (crd->crd_len > MAX_REQUEST_SIZE) return (EFBIG); axf = s->hmac.auth_hash; /* PADs must be 128-bit aligned. */ iopad_size = roundup2(s->hmac.partial_digest_len, 16); /* * The 'key' part of the context includes the aligned IPAD and * OPAD. */ kctx_len = iopad_size * 2; hash_size_in_response = axf->hashsize; transhdr_len = HASH_TRANSHDR_SIZE(kctx_len); if (crd->crd_len == 0) { imm_len = axf->blocksize; sgl_nsegs = 0; sgl_len = 0; } else if (ccr_use_imm_data(transhdr_len, crd->crd_len)) { imm_len = crd->crd_len; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(sc->sg_ulptx); error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crd->crd_skip, crd->crd_len); if (error) return (error); sgl_nsegs = sc->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; wr = alloc_wrqe(wr_len, sc->txq); if (wr == NULL) { sc->stats_wr_nomem++; return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, hash_size_in_response, IV_NOP, crp); /* XXX: Hardcodes SGE loopback channel of 0. */ crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(0)); crwr->sec_cpl.pldlen = htobe32(crd->crd_len == 0 ? axf->blocksize : crd->crd_len); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) | V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_NOP) | V_SCMD_AUTH_MODE(s->hmac.auth_mode) | V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NO_TRUNC)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_LAST_FRAG(0) | V_SCMD_MORE_FRAGS(crd->crd_len == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1)); memcpy(crwr->key_ctx.key, s->hmac.ipad, s->hmac.partial_digest_len); memcpy(crwr->key_ctx.key + iopad_size, s->hmac.opad, s->hmac.partial_digest_len); /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */ kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | V_KEY_CONTEXT_OPAD_PRESENT(1) | V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) | V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1)); dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES; if (crd->crd_len == 0) { dst[0] = 0x80; *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) = htobe64(axf->blocksize << 3); } else if (imm_len != 0) crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip, crd->crd_len, dst); else ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_hmac_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { struct cryptodesc *crd; crd = crp->crp_desc; if (error == 0) { crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, s->hmac.hash_len, (c_caddr_t)(cpl + 1)); } return (error); } static int ccr_blkcipher(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s, struct cryptop *crp) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct chcr_wr *crwr; struct wrqe *wr; struct cryptodesc *crd; char *dst; u_int iv_loc, kctx_len, key_half, op_type, transhdr_len, wr_len; u_int imm_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; crd = crp->crp_desc; if (s->blkcipher.key_len == 0 || crd->crd_len == 0) return (EINVAL); if (crd->crd_alg == CRYPTO_AES_CBC && (crd->crd_len % AES_BLOCK_LEN) != 0) return (EINVAL); /* Reject requests with too large of an input buffer. */ if (crd->crd_len > MAX_REQUEST_SIZE) return (EFBIG); iv_loc = IV_NOP; if (crd->crd_flags & CRD_F_ENCRYPT) { op_type = CHCR_ENCRYPT_OP; if (crd->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crd->crd_iv, s->blkcipher.iv_len); else arc4rand(iv, s->blkcipher.iv_len, 0); iv_loc = IV_IMMEDIATE; if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, s->blkcipher.iv_len, iv); } else { op_type = CHCR_DECRYPT_OP; if (crd->crd_flags & CRD_F_IV_EXPLICIT) { memcpy(iv, crd->crd_iv, s->blkcipher.iv_len); iv_loc = IV_IMMEDIATE; } else iv_loc = IV_DSGL; } sglist_reset(sc->sg_dsgl); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crd->crd_skip, crd->crd_len); if (error) return (error); dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* The 'key' must be 128-bit aligned. */ kctx_len = roundup2(s->blkcipher.key_len, 16); transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); if (ccr_use_imm_data(transhdr_len, crd->crd_len + s->blkcipher.iv_len)) { imm_len = crd->crd_len; if (iv_loc == IV_DSGL) { crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_inject, s->blkcipher.iv_len, iv); iv_loc = IV_IMMEDIATE; } sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(sc->sg_ulptx); if (iv_loc == IV_DSGL) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crd->crd_inject, s->blkcipher.iv_len); if (error) return (error); } error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crd->crd_skip, crd->crd_len); if (error) return (error); sgl_nsegs = sc->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; if (iv_loc == IV_IMMEDIATE) wr_len += s->blkcipher.iv_len; wr = alloc_wrqe(wr_len, sc->txq); if (wr == NULL) { sc->stats_wr_nomem++; return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, 0, iv_loc, crp); /* XXX: Hardcodes SGE loopback channel of 0. */ crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + crd->crd_len); crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTART(s->blkcipher.iv_len + 1) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_NOP) | V_SCMD_HMAC_CTRL(CHCR_SCMD_HMAC_CTRL_NOP) | V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; switch (crd->crd_alg) { case CRYPTO_AES_CBC: if (crd->crd_flags & CRD_F_ENCRYPT) memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); else memcpy(crwr->key_ctx.key, s->blkcipher.deckey, s->blkcipher.key_len); break; case CRYPTO_AES_ICM: memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); break; case CRYPTO_AES_XTS: key_half = s->blkcipher.key_len / 2; memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, key_half); if (crd->crd_flags & CRD_F_ENCRYPT) memcpy(crwr->key_ctx.key + key_half, s->blkcipher.enckey, key_half); else memcpy(crwr->key_ctx.key + key_half, s->blkcipher.deckey, key_half); break; } dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(sc, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; if (iv_loc == IV_IMMEDIATE) { memcpy(dst, iv, s->blkcipher.iv_len); dst += s->blkcipher.iv_len; } if (imm_len != 0) crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip, crd->crd_len, dst); else ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_blkcipher_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. */ return (error); } /* * 'hashsize' is the length of a full digest. 'authsize' is the * requested digest length for this operation which may be less * than 'hashsize'. */ static int ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize) { if (authsize == 10) return (CHCR_SCMD_HMAC_CTRL_TRUNC_RFC4366); if (authsize == 12) return (CHCR_SCMD_HMAC_CTRL_IPSEC_96BIT); if (authsize == hashsize / 2) return (CHCR_SCMD_HMAC_CTRL_DIV2); return (CHCR_SCMD_HMAC_CTRL_NO_TRUNC); } static int ccr_authenc(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s, struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct chcr_wr *crwr; struct wrqe *wr; struct auth_hash *axf; char *dst; u_int iv_loc, kctx_len, key_half, op_type, transhdr_len, wr_len; u_int hash_size_in_response, imm_len, iopad_size; u_int aad_start, aad_len, aad_stop; u_int auth_start, auth_stop, auth_insert; u_int cipher_start, cipher_stop; u_int hmac_ctrl, input_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; /* * If there is a need in the future, requests with an empty * payload could be supported as HMAC-only requests. */ if (s->blkcipher.key_len == 0 || crde->crd_len == 0) return (EINVAL); if (crde->crd_alg == CRYPTO_AES_CBC && (crde->crd_len % AES_BLOCK_LEN) != 0) return (EINVAL); /* * AAD is only permitted before the cipher/plain text, not * after. */ if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip) return (EINVAL); axf = s->hmac.auth_hash; hash_size_in_response = s->hmac.hash_len; /* * The IV is always stored at the start of the buffer even * though it may be duplicated in the payload. The crypto * engine doesn't work properly if the IV offset points inside * of the AAD region, so a second copy is always required. */ iv_loc = IV_IMMEDIATE; if (crde->crd_flags & CRD_F_ENCRYPT) { op_type = CHCR_ENCRYPT_OP; if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, s->blkcipher.iv_len); else arc4rand(iv, s->blkcipher.iv_len, 0); if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, crde->crd_inject, s->blkcipher.iv_len, iv); } else { op_type = CHCR_DECRYPT_OP; if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, s->blkcipher.iv_len); else crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_inject, s->blkcipher.iv_len, iv); } /* * The output buffer consists of the cipher text followed by * the hash when encrypting. For decryption it only contains * the plain text. */ if (op_type == CHCR_ENCRYPT_OP) { if (crde->crd_len + hash_size_in_response > MAX_REQUEST_SIZE) return (EFBIG); } else { if (crde->crd_len > MAX_REQUEST_SIZE) return (EFBIG); } sglist_reset(sc->sg_dsgl); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip, crde->crd_len); if (error) return (error); if (op_type == CHCR_ENCRYPT_OP) { error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crda->crd_inject, hash_size_in_response); if (error) return (error); } dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* PADs must be 128-bit aligned. */ iopad_size = roundup2(s->hmac.partial_digest_len, 16); /* * The 'key' part of the key context consists of the key followed * by the IPAD and OPAD. */ kctx_len = roundup2(s->blkcipher.key_len, 16) + iopad_size * 2; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); /* * The input buffer consists of the IV, any AAD, and then the * cipher/plain text. For decryption requests the hash is * appended after the cipher text. */ if (crda->crd_skip < crde->crd_skip) { if (crda->crd_skip + crda->crd_len > crde->crd_skip) aad_len = (crde->crd_skip - crda->crd_skip); else aad_len = crda->crd_len; } else aad_len = 0; input_len = aad_len + crde->crd_len; /* * The firmware hangs if sent a request which is a * bit smaller than MAX_REQUEST_SIZE. In particular, the * firmware appears to require 512 - 16 bytes of spare room * along with the size of the hash even if the hash isn't * included in the input buffer. */ if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) > MAX_REQUEST_SIZE) return (EFBIG); if (op_type == CHCR_DECRYPT_OP) input_len += hash_size_in_response; if (ccr_use_imm_data(transhdr_len, s->blkcipher.iv_len + input_len)) { imm_len = input_len; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(sc->sg_ulptx); if (aad_len != 0) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crda->crd_skip, aad_len); if (error) return (error); } error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crde->crd_skip, crde->crd_len); if (error) return (error); if (op_type == CHCR_DECRYPT_OP) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crda->crd_inject, hash_size_in_response); if (error) return (error); } sgl_nsegs = sc->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } /* * Any auth-only data before the cipher region is marked as AAD. * Auth-data that overlaps with the cipher region is placed in * the auth section. */ if (aad_len != 0) { aad_start = s->blkcipher.iv_len + 1; aad_stop = aad_start + aad_len - 1; } else { aad_start = 0; aad_stop = 0; } cipher_start = s->blkcipher.iv_len + aad_len + 1; if (op_type == CHCR_DECRYPT_OP) cipher_stop = hash_size_in_response; else cipher_stop = 0; if (aad_len == crda->crd_len) { auth_start = 0; auth_stop = 0; } else { if (aad_len != 0) auth_start = cipher_start; else auth_start = s->blkcipher.iv_len + crda->crd_skip - crde->crd_skip + 1; auth_stop = (crde->crd_skip + crde->crd_len) - (crda->crd_skip + crda->crd_len) + cipher_stop; } if (op_type == CHCR_DECRYPT_OP) auth_insert = hash_size_in_response; else auth_insert = 0; wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; if (iv_loc == IV_IMMEDIATE) wr_len += s->blkcipher.iv_len; wr = alloc_wrqe(wr_len, sc->txq); if (wr == NULL) { sc->stats_wr_nomem++; return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, iv_loc, crp); /* XXX: Hardcodes SGE loopback channel of 0. */ crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + input_len); crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_AADSTART(aad_start) | V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) | V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) | V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) | V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response); crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) | V_SCMD_AUTH_MODE(s->hmac.auth_mode) | V_SCMD_HMAC_CTRL(hmac_ctrl) | V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; switch (crde->crd_alg) { case CRYPTO_AES_CBC: if (crde->crd_flags & CRD_F_ENCRYPT) memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); else memcpy(crwr->key_ctx.key, s->blkcipher.deckey, s->blkcipher.key_len); break; case CRYPTO_AES_ICM: memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); break; case CRYPTO_AES_XTS: key_half = s->blkcipher.key_len / 2; memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half, key_half); if (crde->crd_flags & CRD_F_ENCRYPT) memcpy(crwr->key_ctx.key + key_half, s->blkcipher.enckey, key_half); else memcpy(crwr->key_ctx.key + key_half, s->blkcipher.deckey, key_half); break; } dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); memcpy(dst, s->hmac.ipad, s->hmac.partial_digest_len); memcpy(dst + iopad_size, s->hmac.opad, s->hmac.partial_digest_len); dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(sc, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; if (iv_loc == IV_IMMEDIATE) { memcpy(dst, iv, s->blkcipher.iv_len); dst += s->blkcipher.iv_len; } if (imm_len != 0) { if (aad_len != 0) { crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip, aad_len, dst); dst += aad_len; } crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip, crde->crd_len, dst); dst += crde->crd_len; if (op_type == CHCR_DECRYPT_OP) crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject, hash_size_in_response, dst); } else ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_authenc_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { struct cryptodesc *crd; /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. * * For a decryption request, the hardware may do a verification * of the HMAC which will fail if the existing HMAC isn't in the * buffer. If that happens, clear the error and copy the HMAC * from the CPL reply into the buffer. * * For encryption requests, crd should be the cipher request * which will have CRD_F_ENCRYPT set. For decryption * requests, crp_desc will be the HMAC request which should * not have this flag set. */ crd = crp->crp_desc; if (error == EBADMSG && !CHK_PAD_ERR_BIT(be64toh(cpl->data[0])) && !(crd->crd_flags & CRD_F_ENCRYPT)) { crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, s->hmac.hash_len, (c_caddr_t)(cpl + 1)); error = 0; } return (error); } static int ccr_gcm(struct ccr_softc *sc, uint32_t sid, struct ccr_session *s, struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde) { char iv[CHCR_MAX_CRYPTO_IV_LEN]; struct chcr_wr *crwr; struct wrqe *wr; char *dst; u_int iv_len, iv_loc, kctx_len, op_type, transhdr_len, wr_len; u_int hash_size_in_response, imm_len; u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert; u_int hmac_ctrl, input_len; int dsgl_nsegs, dsgl_len; int sgl_nsegs, sgl_len; int error; if (s->blkcipher.key_len == 0) return (EINVAL); /* * AAD is only permitted before the cipher/plain text, not * after. */ if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip) return (EINVAL); hash_size_in_response = s->gmac.hash_len; /* * The IV is always stored at the start of the buffer even * though it may be duplicated in the payload. The crypto * engine doesn't work properly if the IV offset points inside * of the AAD region, so a second copy is always required. * * The IV for GCM is further complicated in that IPSec * provides a full 16-byte IV (including the counter), whereas * the /dev/crypto interface sometimes provides a full 16-byte * IV (if no IV is provided in the ioctl) and sometimes a * 12-byte IV (if the IV was explicit). For now the driver * always assumes a 12-byte IV and initializes the low 4 byte * counter to 1. */ iv_loc = IV_IMMEDIATE; if (crde->crd_flags & CRD_F_ENCRYPT) { op_type = CHCR_ENCRYPT_OP; if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, s->blkcipher.iv_len); else arc4rand(iv, s->blkcipher.iv_len, 0); if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0) crypto_copyback(crp->crp_flags, crp->crp_buf, crde->crd_inject, s->blkcipher.iv_len, iv); } else { op_type = CHCR_DECRYPT_OP; if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, s->blkcipher.iv_len); else crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_inject, s->blkcipher.iv_len, iv); } /* * If the input IV is 12 bytes, append an explicit counter of * 1. */ if (s->blkcipher.iv_len == 12) { *(uint32_t *)&iv[12] = htobe32(1); iv_len = AES_BLOCK_LEN; } else iv_len = s->blkcipher.iv_len; /* * The output buffer consists of the cipher text followed by * the tag when encrypting. For decryption it only contains * the plain text. */ if (op_type == CHCR_ENCRYPT_OP) { if (crde->crd_len + hash_size_in_response > MAX_REQUEST_SIZE) return (EFBIG); } else { if (crde->crd_len > MAX_REQUEST_SIZE) return (EFBIG); } sglist_reset(sc->sg_dsgl); error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip, crde->crd_len); if (error) return (error); if (op_type == CHCR_ENCRYPT_OP) { error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crda->crd_inject, hash_size_in_response); if (error) return (error); } dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN); if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE) return (EFBIG); dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs); /* * The 'key' part of the key context consists of the key followed * by the Galois hash key. */ kctx_len = roundup2(s->blkcipher.key_len, 16) + GMAC_BLOCK_LEN; transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len); /* * The input buffer consists of the IV, any AAD, and then the * cipher/plain text. For decryption requests the hash is * appended after the cipher text. */ input_len = crda->crd_len + crde->crd_len; if (op_type == CHCR_DECRYPT_OP) input_len += hash_size_in_response; if (input_len > MAX_REQUEST_SIZE) return (EFBIG); if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) { imm_len = input_len; sgl_nsegs = 0; sgl_len = 0; } else { imm_len = 0; sglist_reset(sc->sg_ulptx); if (crda->crd_len != 0) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crda->crd_skip, crda->crd_len); if (error) return (error); } error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crde->crd_skip, crde->crd_len); if (error) return (error); if (op_type == CHCR_DECRYPT_OP) { error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp, crda->crd_inject, hash_size_in_response); if (error) return (error); } sgl_nsegs = sc->sg_ulptx->sg_nseg; sgl_len = ccr_ulptx_sgl_len(sgl_nsegs); } if (crda->crd_len != 0) { aad_start = iv_len + 1; aad_stop = aad_start + crda->crd_len - 1; } else { aad_start = 0; aad_stop = 0; } cipher_start = iv_len + crda->crd_len + 1; if (op_type == CHCR_DECRYPT_OP) cipher_stop = hash_size_in_response; else cipher_stop = 0; if (op_type == CHCR_DECRYPT_OP) auth_insert = hash_size_in_response; else auth_insert = 0; wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len; if (iv_loc == IV_IMMEDIATE) wr_len += iv_len; wr = alloc_wrqe(wr_len, sc->txq); if (wr == NULL) { sc->stats_wr_nomem++; return (ENOMEM); } crwr = wrtod(wr); memset(crwr, 0, wr_len); ccr_populate_wreq(sc, crwr, kctx_len, wr_len, sid, imm_len, sgl_len, 0, iv_loc, crp); /* XXX: Hardcodes SGE loopback channel of 0. */ crwr->sec_cpl.op_ivinsrtofst = htobe32( V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) | V_CPL_TX_SEC_PDU_RXCHID(sc->tx_channel_id) | V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) | V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) | V_CPL_TX_SEC_PDU_IVINSRTOFST(1)); crwr->sec_cpl.pldlen = htobe32(iv_len + input_len); /* * NB: cipherstop is explicitly set to 0. On encrypt it * should normally be set to 0 anyway (as the encrypt crd ends * at the end of the input). However, for decrypt the cipher * ends before the tag in the AUTHENC case (and authstop is * set to stop before the tag), but for GCM the cipher still * runs to the end of the buffer. Not sure if this is * intentional or a firmware quirk, but it is required for * working tag validation with GCM decryption. */ crwr->sec_cpl.aadstart_cipherstop_hi = htobe32( V_CPL_TX_SEC_PDU_AADSTART(aad_start) | V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) | V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) | V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0)); crwr->sec_cpl.cipherstop_lo_authinsert = htobe32( V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) | V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) | V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) | V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert)); /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */ hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response); crwr->sec_cpl.seqno_numivs = htobe32( V_SCMD_SEQ_NO_CTRL(0) | V_SCMD_PROTO_VERSION(CHCR_SCMD_PROTO_VERSION_GENERIC) | V_SCMD_ENC_DEC_CTRL(op_type) | V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) | V_SCMD_CIPH_MODE(CHCR_SCMD_CIPHER_MODE_AES_GCM) | V_SCMD_AUTH_MODE(CHCR_SCMD_AUTH_MODE_GHASH) | V_SCMD_HMAC_CTRL(hmac_ctrl) | V_SCMD_IV_SIZE(iv_len / 2) | V_SCMD_NUM_IVS(0)); crwr->sec_cpl.ivgen_hdrlen = htobe32( V_SCMD_IV_GEN_CTRL(0) | V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) | V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len)); crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr; memcpy(crwr->key_ctx.key, s->blkcipher.enckey, s->blkcipher.key_len); dst = crwr->key_ctx.key + roundup2(s->blkcipher.key_len, 16); memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN); dst = (char *)(crwr + 1) + kctx_len; ccr_write_phys_dsgl(sc, dst, dsgl_nsegs); dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len; if (iv_loc == IV_IMMEDIATE) { memcpy(dst, iv, iv_len); dst += iv_len; } if (imm_len != 0) { if (crda->crd_len != 0) { crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip, crda->crd_len, dst); dst += crda->crd_len; } crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip, crde->crd_len, dst); dst += crde->crd_len; if (op_type == CHCR_DECRYPT_OP) crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject, hash_size_in_response, dst); } else ccr_write_ulptx_sgl(sc, dst, sgl_nsegs); /* XXX: TODO backpressure */ t4_wrq_tx(sc->adapter, wr); return (0); } static int ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error) { /* * The updated IV to permit chained requests is at * cpl->data[2], but OCF doesn't permit chained requests. * * Note that the hardware should always verify the GMAC hash. */ return (error); } /* * Handle a GCM request with an empty payload by performing the * operation in software. Derived from swcr_authenc(). */ static void ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde) { struct aes_gmac_ctx gmac_ctx; char block[GMAC_BLOCK_LEN]; char digest[GMAC_DIGEST_LEN]; char iv[AES_BLOCK_LEN]; int i, len; /* * This assumes a 12-byte IV from the crp. See longer comment * above in ccr_gcm() for more details. */ if (crde->crd_flags & CRD_F_ENCRYPT) { if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, 12); else arc4rand(iv, 12, 0); } else { if (crde->crd_flags & CRD_F_IV_EXPLICIT) memcpy(iv, crde->crd_iv, 12); else crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_inject, 12, iv); } *(uint32_t *)&iv[12] = htobe32(1); /* Initialize the MAC. */ AES_GMAC_Init(&gmac_ctx); AES_GMAC_Setkey(&gmac_ctx, s->blkcipher.enckey, s->blkcipher.key_len); AES_GMAC_Reinit(&gmac_ctx, iv, sizeof(iv)); /* MAC the AAD. */ for (i = 0; i < crda->crd_len; i += sizeof(block)) { len = imin(crda->crd_len - i, sizeof(block)); crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip + i, len, block); bzero(block + len, sizeof(block) - len); AES_GMAC_Update(&gmac_ctx, block, sizeof(block)); } /* Length block. */ bzero(block, sizeof(block)); ((uint32_t *)block)[1] = htobe32(crda->crd_len * 8); AES_GMAC_Update(&gmac_ctx, block, sizeof(block)); AES_GMAC_Final(digest, &gmac_ctx); if (crde->crd_flags & CRD_F_ENCRYPT) { crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject, sizeof(digest), digest); crp->crp_etype = 0; } else { char digest2[GMAC_DIGEST_LEN]; crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject, sizeof(digest2), digest2); if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) crp->crp_etype = 0; else crp->crp_etype = EBADMSG; } crypto_done(crp); } static void ccr_identify(driver_t *driver, device_t parent) { struct adapter *sc; sc = device_get_softc(parent); if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE && device_find_child(parent, "ccr", -1) == NULL) device_add_child(parent, "ccr", -1); } static int ccr_probe(device_t dev) { device_set_desc(dev, "Chelsio Crypto Accelerator"); return (BUS_PROBE_DEFAULT); } static void ccr_sysctls(struct ccr_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *oid; struct sysctl_oid_list *children; ctx = device_get_sysctl_ctx(sc->dev); /* * dev.ccr.X. */ oid = device_get_sysctl_tree(sc->dev); children = SYSCTL_CHILDREN(oid); /* * dev.ccr.X.stats. */ oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats", CTLFLAG_RD, NULL, "statistics"); children = SYSCTL_CHILDREN(oid); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD, &sc->stats_hmac, 0, "HMAC requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_encrypt", CTLFLAG_RD, &sc->stats_blkcipher_encrypt, 0, "Cipher encryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD, &sc->stats_blkcipher_decrypt, 0, "Cipher decryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_encrypt", CTLFLAG_RD, &sc->stats_authenc_encrypt, 0, "Combined AES+HMAC encryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_decrypt", CTLFLAG_RD, &sc->stats_authenc_decrypt, 0, "Combined AES+HMAC decryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD, &sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_decrypt", CTLFLAG_RD, &sc->stats_gcm_decrypt, 0, "AES-GCM decryption requests submitted"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD, &sc->stats_wr_nomem, 0, "Work request memory allocation failures"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD, &sc->stats_inflight, 0, "Requests currently pending"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD, &sc->stats_mac_error, 0, "MAC errors"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD, &sc->stats_pad_error, 0, "Padding errors"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "bad_session", CTLFLAG_RD, &sc->stats_bad_session, 0, "Requests with invalid session ID"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "sglist_error", CTLFLAG_RD, &sc->stats_sglist_error, 0, "Requests for which DMA mapping failed"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "process_error", CTLFLAG_RD, &sc->stats_process_error, 0, "Requests failed during queueing"); } static int ccr_attach(device_t dev) { struct ccr_softc *sc; int32_t cid; /* * TODO: Crypto requests will panic if the parent device isn't * initialized so that the queues are up and running. Need to * figure out how to handle that correctly, maybe just reject * requests if the adapter isn't fully initialized? */ sc = device_get_softc(dev); sc->dev = dev; sc->adapter = device_get_softc(device_get_parent(dev)); sc->txq = &sc->adapter->sge.ctrlq[0]; sc->rxq = &sc->adapter->sge.rxq[0]; cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE); if (cid < 0) { device_printf(dev, "could not get crypto driver id\n"); return (ENXIO); } sc->cid = cid; sc->adapter->ccr_softc = sc; /* XXX: TODO? */ sc->tx_channel_id = 0; mtx_init(&sc->lock, "ccr", NULL, MTX_DEF); sc->sg_crp = sglist_alloc(TX_SGL_SEGS, M_WAITOK); sc->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_WAITOK); sc->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_WAITOK); ccr_sysctls(sc); crypto_register(cid, CRYPTO_SHA1_HMAC, 0, 0); crypto_register(cid, CRYPTO_SHA2_256_HMAC, 0, 0); crypto_register(cid, CRYPTO_SHA2_384_HMAC, 0, 0); crypto_register(cid, CRYPTO_SHA2_512_HMAC, 0, 0); crypto_register(cid, CRYPTO_AES_CBC, 0, 0); crypto_register(cid, CRYPTO_AES_ICM, 0, 0); crypto_register(cid, CRYPTO_AES_NIST_GCM_16, 0, 0); crypto_register(cid, CRYPTO_AES_128_NIST_GMAC, 0, 0); crypto_register(cid, CRYPTO_AES_192_NIST_GMAC, 0, 0); crypto_register(cid, CRYPTO_AES_256_NIST_GMAC, 0, 0); crypto_register(cid, CRYPTO_AES_XTS, 0, 0); return (0); } static int ccr_detach(device_t dev) { struct ccr_softc *sc; int i; sc = device_get_softc(dev); mtx_lock(&sc->lock); for (i = 0; i < sc->nsessions; i++) { if (sc->sessions[i].active || sc->sessions[i].pending != 0) { mtx_unlock(&sc->lock); return (EBUSY); } } sc->detaching = true; mtx_unlock(&sc->lock); crypto_unregister_all(sc->cid); free(sc->sessions, M_CCR); mtx_destroy(&sc->lock); sglist_free(sc->sg_dsgl); sglist_free(sc->sg_ulptx); sglist_free(sc->sg_crp); sc->adapter->ccr_softc = NULL; return (0); } static void ccr_copy_partial_hash(void *dst, int cri_alg, union authctx *auth_ctx) { uint32_t *u32; uint64_t *u64; u_int i; u32 = (uint32_t *)dst; u64 = (uint64_t *)dst; switch (cri_alg) { case CRYPTO_SHA1_HMAC: for (i = 0; i < SHA1_HASH_LEN / 4; i++) u32[i] = htobe32(auth_ctx->sha1ctx.h.b32[i]); break; case CRYPTO_SHA2_256_HMAC: for (i = 0; i < SHA2_256_HASH_LEN / 4; i++) u32[i] = htobe32(auth_ctx->sha256ctx.state[i]); break; case CRYPTO_SHA2_384_HMAC: for (i = 0; i < SHA2_512_HASH_LEN / 8; i++) u64[i] = htobe64(auth_ctx->sha384ctx.state[i]); break; case CRYPTO_SHA2_512_HMAC: for (i = 0; i < SHA2_512_HASH_LEN / 8; i++) u64[i] = htobe64(auth_ctx->sha512ctx.state[i]); break; } } static void ccr_init_hmac_digest(struct ccr_session *s, int cri_alg, char *key, int klen) { union authctx auth_ctx; struct auth_hash *axf; u_int i; /* * If the key is larger than the block size, use the digest of * the key as the key instead. */ axf = s->hmac.auth_hash; klen /= 8; if (klen > axf->blocksize) { axf->Init(&auth_ctx); axf->Update(&auth_ctx, key, klen); axf->Final(s->hmac.ipad, &auth_ctx); klen = axf->hashsize; } else memcpy(s->hmac.ipad, key, klen); memset(s->hmac.ipad + klen, 0, axf->blocksize); memcpy(s->hmac.opad, s->hmac.ipad, axf->blocksize); for (i = 0; i < axf->blocksize; i++) { s->hmac.ipad[i] ^= HMAC_IPAD_VAL; s->hmac.opad[i] ^= HMAC_OPAD_VAL; } /* * Hash the raw ipad and opad and store the partial result in * the same buffer. */ axf->Init(&auth_ctx); axf->Update(&auth_ctx, s->hmac.ipad, axf->blocksize); ccr_copy_partial_hash(s->hmac.ipad, cri_alg, &auth_ctx); axf->Init(&auth_ctx); axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize); ccr_copy_partial_hash(s->hmac.opad, cri_alg, &auth_ctx); } /* * Borrowed from AES_GMAC_Setkey(). */ static void ccr_init_gmac_hash(struct ccr_session *s, char *key, int klen) { static char zeroes[GMAC_BLOCK_LEN]; uint32_t keysched[4 * (RIJNDAEL_MAXNR + 1)]; int rounds; rounds = rijndaelKeySetupEnc(keysched, key, klen); rijndaelEncrypt(keysched, rounds, zeroes, s->gmac.ghash_h); } static int ccr_aes_check_keylen(int alg, int klen) { switch (klen) { case 128: case 192: if (alg == CRYPTO_AES_XTS) return (EINVAL); break; case 256: break; case 512: if (alg != CRYPTO_AES_XTS) return (EINVAL); break; default: return (EINVAL); } return (0); } /* * Borrowed from cesa_prep_aes_key(). We should perhaps have a public * function to generate this instead. * * NB: The crypto engine wants the words in the decryption key in reverse * order. */ static void ccr_aes_getdeckey(void *dec_key, const void *enc_key, unsigned int kbits) { uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)]; uint32_t *dkey; int i; rijndaelKeySetupEnc(ek, enc_key, kbits); dkey = dec_key; dkey += (kbits / 8) / 4; switch (kbits) { case 128: for (i = 0; i < 4; i++) *--dkey = htobe32(ek[4 * 10 + i]); break; case 192: for (i = 0; i < 2; i++) *--dkey = htobe32(ek[4 * 11 + 2 + i]); for (i = 0; i < 4; i++) *--dkey = htobe32(ek[4 * 12 + i]); break; case 256: for (i = 0; i < 4; i++) *--dkey = htobe32(ek[4 * 13 + i]); for (i = 0; i < 4; i++) *--dkey = htobe32(ek[4 * 14 + i]); break; } MPASS(dkey == dec_key); } static void ccr_aes_setkey(struct ccr_session *s, int alg, const void *key, int klen) { unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size; unsigned int opad_present; if (alg == CRYPTO_AES_XTS) kbits = klen / 2; else kbits = klen; switch (kbits) { case 128: ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128; break; case 192: ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192; break; case 256: ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256; break; default: panic("should not get here"); } s->blkcipher.key_len = klen / 8; memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len); switch (alg) { case CRYPTO_AES_CBC: case CRYPTO_AES_XTS: ccr_aes_getdeckey(s->blkcipher.deckey, key, kbits); break; } kctx_len = roundup2(s->blkcipher.key_len, 16); switch (s->mode) { case AUTHENC: mk_size = s->hmac.mk_size; opad_present = 1; iopad_size = roundup2(s->hmac.partial_digest_len, 16); kctx_len += iopad_size * 2; break; case GCM: mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; opad_present = 0; kctx_len += GMAC_BLOCK_LEN; break; default: mk_size = CHCR_KEYCTX_NO_KEY; opad_present = 0; break; } kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16; s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) | V_KEY_CONTEXT_DUAL_CK(alg == CRYPTO_AES_XTS) | V_KEY_CONTEXT_OPAD_PRESENT(opad_present) | V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) | V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1)); } static int ccr_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri) { struct ccr_softc *sc; struct ccr_session *s; struct auth_hash *auth_hash; struct cryptoini *c, *hash, *cipher; unsigned int auth_mode, cipher_mode, iv_len, mk_size; unsigned int partial_digest_len; int error, i, sess; bool gcm_hash; if (sidp == NULL || cri == NULL) return (EINVAL); gcm_hash = false; cipher = NULL; hash = NULL; auth_hash = NULL; auth_mode = CHCR_SCMD_AUTH_MODE_NOP; cipher_mode = CHCR_SCMD_CIPHER_MODE_NOP; iv_len = 0; mk_size = 0; partial_digest_len = 0; for (c = cri; c != NULL; c = c->cri_next) { switch (c->cri_alg) { case CRYPTO_SHA1_HMAC: case CRYPTO_SHA2_256_HMAC: case CRYPTO_SHA2_384_HMAC: case CRYPTO_SHA2_512_HMAC: case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: if (hash) return (EINVAL); hash = c; switch (c->cri_alg) { case CRYPTO_SHA1_HMAC: auth_hash = &auth_hash_hmac_sha1; auth_mode = CHCR_SCMD_AUTH_MODE_SHA1; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160; partial_digest_len = SHA1_HASH_LEN; break; case CRYPTO_SHA2_256_HMAC: auth_hash = &auth_hash_hmac_sha2_256; auth_mode = CHCR_SCMD_AUTH_MODE_SHA256; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256; partial_digest_len = SHA2_256_HASH_LEN; break; case CRYPTO_SHA2_384_HMAC: auth_hash = &auth_hash_hmac_sha2_384; auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_384; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; partial_digest_len = SHA2_512_HASH_LEN; break; case CRYPTO_SHA2_512_HMAC: auth_hash = &auth_hash_hmac_sha2_512; auth_mode = CHCR_SCMD_AUTH_MODE_SHA512_512; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512; partial_digest_len = SHA2_512_HASH_LEN; break; case CRYPTO_AES_128_NIST_GMAC: case CRYPTO_AES_192_NIST_GMAC: case CRYPTO_AES_256_NIST_GMAC: gcm_hash = true; auth_mode = CHCR_SCMD_AUTH_MODE_GHASH; mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128; break; } break; case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: case CRYPTO_AES_NIST_GCM_16: case CRYPTO_AES_XTS: if (cipher) return (EINVAL); cipher = c; switch (c->cri_alg) { case CRYPTO_AES_CBC: cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CBC; iv_len = AES_BLOCK_LEN; break; case CRYPTO_AES_ICM: cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_CTR; iv_len = AES_BLOCK_LEN; break; case CRYPTO_AES_NIST_GCM_16: cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_GCM; iv_len = AES_GCM_IV_LEN; break; case CRYPTO_AES_XTS: cipher_mode = CHCR_SCMD_CIPHER_MODE_AES_XTS; iv_len = AES_BLOCK_LEN; break; } if (c->cri_key != NULL) { error = ccr_aes_check_keylen(c->cri_alg, c->cri_klen); if (error) return (error); } break; default: return (EINVAL); } } if (gcm_hash != (cipher_mode == CHCR_SCMD_CIPHER_MODE_AES_GCM)) return (EINVAL); if (hash == NULL && cipher == NULL) return (EINVAL); if (hash != NULL && hash->cri_key == NULL) return (EINVAL); sc = device_get_softc(dev); mtx_lock(&sc->lock); if (sc->detaching) { mtx_unlock(&sc->lock); return (ENXIO); } sess = -1; for (i = 0; i < sc->nsessions; i++) { if (!sc->sessions[i].active && sc->sessions[i].pending == 0) { sess = i; break; } } if (sess == -1) { - s = malloc(sizeof(*s) * (sc->nsessions + 1), M_CCR, + s = mallocarray(sc->nsessions + 1, sizeof(*s), M_CCR, M_NOWAIT | M_ZERO); if (s == NULL) { mtx_unlock(&sc->lock); return (ENOMEM); } if (sc->sessions != NULL) memcpy(s, sc->sessions, sizeof(*s) * sc->nsessions); sess = sc->nsessions; free(sc->sessions, M_CCR); sc->sessions = s; sc->nsessions++; } s = &sc->sessions[sess]; if (gcm_hash) s->mode = GCM; else if (hash != NULL && cipher != NULL) s->mode = AUTHENC; else if (hash != NULL) s->mode = HMAC; else { MPASS(cipher != NULL); s->mode = BLKCIPHER; } if (gcm_hash) { if (hash->cri_mlen == 0) s->gmac.hash_len = AES_GMAC_HASH_LEN; else s->gmac.hash_len = hash->cri_mlen; ccr_init_gmac_hash(s, hash->cri_key, hash->cri_klen); } else if (hash != NULL) { s->hmac.auth_hash = auth_hash; s->hmac.auth_mode = auth_mode; s->hmac.mk_size = mk_size; s->hmac.partial_digest_len = partial_digest_len; if (hash->cri_mlen == 0) s->hmac.hash_len = auth_hash->hashsize; else s->hmac.hash_len = hash->cri_mlen; ccr_init_hmac_digest(s, hash->cri_alg, hash->cri_key, hash->cri_klen); } if (cipher != NULL) { s->blkcipher.cipher_mode = cipher_mode; s->blkcipher.iv_len = iv_len; if (cipher->cri_key != NULL) ccr_aes_setkey(s, cipher->cri_alg, cipher->cri_key, cipher->cri_klen); } s->active = true; mtx_unlock(&sc->lock); *sidp = sess; return (0); } static int ccr_freesession(device_t dev, uint64_t tid) { struct ccr_softc *sc; uint32_t sid; int error; sc = device_get_softc(dev); sid = CRYPTO_SESID2LID(tid); mtx_lock(&sc->lock); if (sid >= sc->nsessions || !sc->sessions[sid].active) error = EINVAL; else { if (sc->sessions[sid].pending != 0) device_printf(dev, "session %d freed with %d pending requests\n", sid, sc->sessions[sid].pending); sc->sessions[sid].active = false; error = 0; } mtx_unlock(&sc->lock); return (error); } static int ccr_process(device_t dev, struct cryptop *crp, int hint) { struct ccr_softc *sc; struct ccr_session *s; struct cryptodesc *crd, *crda, *crde; uint32_t sid; int error; if (crp == NULL) return (EINVAL); crd = crp->crp_desc; sid = CRYPTO_SESID2LID(crp->crp_sid); sc = device_get_softc(dev); mtx_lock(&sc->lock); if (sid >= sc->nsessions || !sc->sessions[sid].active) { sc->stats_bad_session++; error = EINVAL; goto out; } error = ccr_populate_sglist(sc->sg_crp, crp); if (error) { sc->stats_sglist_error++; goto out; } s = &sc->sessions[sid]; switch (s->mode) { case HMAC: if (crd->crd_flags & CRD_F_KEY_EXPLICIT) ccr_init_hmac_digest(s, crd->crd_alg, crd->crd_key, crd->crd_klen); error = ccr_hmac(sc, sid, s, crp); if (error == 0) sc->stats_hmac++; break; case BLKCIPHER: if (crd->crd_flags & CRD_F_KEY_EXPLICIT) { error = ccr_aes_check_keylen(crd->crd_alg, crd->crd_klen); if (error) break; ccr_aes_setkey(s, crd->crd_alg, crd->crd_key, crd->crd_klen); } error = ccr_blkcipher(sc, sid, s, crp); if (error == 0) { if (crd->crd_flags & CRD_F_ENCRYPT) sc->stats_blkcipher_encrypt++; else sc->stats_blkcipher_decrypt++; } break; case AUTHENC: error = 0; switch (crd->crd_alg) { case CRYPTO_AES_CBC: case CRYPTO_AES_ICM: case CRYPTO_AES_XTS: /* Only encrypt-then-authenticate supported. */ crde = crd; crda = crd->crd_next; if (!(crde->crd_flags & CRD_F_ENCRYPT)) { error = EINVAL; break; } break; default: crda = crd; crde = crd->crd_next; if (crde->crd_flags & CRD_F_ENCRYPT) { error = EINVAL; break; } break; } if (error) break; if (crda->crd_flags & CRD_F_KEY_EXPLICIT) ccr_init_hmac_digest(s, crda->crd_alg, crda->crd_key, crda->crd_klen); if (crde->crd_flags & CRD_F_KEY_EXPLICIT) { error = ccr_aes_check_keylen(crde->crd_alg, crde->crd_klen); if (error) break; ccr_aes_setkey(s, crde->crd_alg, crde->crd_key, crde->crd_klen); } error = ccr_authenc(sc, sid, s, crp, crda, crde); if (error == 0) { if (crde->crd_flags & CRD_F_ENCRYPT) sc->stats_authenc_encrypt++; else sc->stats_authenc_decrypt++; } break; case GCM: error = 0; if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) { crde = crd; crda = crd->crd_next; } else { crda = crd; crde = crd->crd_next; } if (crda->crd_flags & CRD_F_KEY_EXPLICIT) ccr_init_gmac_hash(s, crda->crd_key, crda->crd_klen); if (crde->crd_flags & CRD_F_KEY_EXPLICIT) { error = ccr_aes_check_keylen(crde->crd_alg, crde->crd_klen); if (error) break; ccr_aes_setkey(s, crde->crd_alg, crde->crd_key, crde->crd_klen); } if (crde->crd_len == 0) { mtx_unlock(&sc->lock); ccr_gcm_soft(s, crp, crda, crde); return (0); } error = ccr_gcm(sc, sid, s, crp, crda, crde); if (error == 0) { if (crde->crd_flags & CRD_F_ENCRYPT) sc->stats_gcm_encrypt++; else sc->stats_gcm_decrypt++; } break; } if (error == 0) { s->pending++; sc->stats_inflight++; } else sc->stats_process_error++; out: mtx_unlock(&sc->lock); if (error) { crp->crp_etype = error; crypto_done(crp); } return (0); } static int do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) { struct ccr_softc *sc = iq->adapter->ccr_softc; struct ccr_session *s; const struct cpl_fw6_pld *cpl; struct cryptop *crp; uint32_t sid, status; int error; if (m != NULL) cpl = mtod(m, const void *); else cpl = (const void *)(rss + 1); crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]); sid = CRYPTO_SESID2LID(crp->crp_sid); status = be64toh(cpl->data[0]); if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status)) error = EBADMSG; else error = 0; mtx_lock(&sc->lock); MPASS(sid < sc->nsessions); s = &sc->sessions[sid]; s->pending--; sc->stats_inflight--; switch (s->mode) { case HMAC: error = ccr_hmac_done(sc, s, crp, cpl, error); break; case BLKCIPHER: error = ccr_blkcipher_done(sc, s, crp, cpl, error); break; case AUTHENC: error = ccr_authenc_done(sc, s, crp, cpl, error); break; case GCM: error = ccr_gcm_done(sc, s, crp, cpl, error); break; } if (error == EBADMSG) { if (CHK_MAC_ERR_BIT(status)) sc->stats_mac_error++; if (CHK_PAD_ERR_BIT(status)) sc->stats_pad_error++; } mtx_unlock(&sc->lock); crp->crp_etype = error; crypto_done(crp); m_freem(m); return (0); } static int ccr_modevent(module_t mod, int cmd, void *arg) { switch (cmd) { case MOD_LOAD: t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld); return (0); case MOD_UNLOAD: t4_register_cpl_handler(CPL_FW6_PLD, NULL); return (0); default: return (EOPNOTSUPP); } } static device_method_t ccr_methods[] = { DEVMETHOD(device_identify, ccr_identify), DEVMETHOD(device_probe, ccr_probe), DEVMETHOD(device_attach, ccr_attach), DEVMETHOD(device_detach, ccr_detach), DEVMETHOD(cryptodev_newsession, ccr_newsession), DEVMETHOD(cryptodev_freesession, ccr_freesession), DEVMETHOD(cryptodev_process, ccr_process), DEVMETHOD_END }; static driver_t ccr_driver = { "ccr", ccr_methods, sizeof(struct ccr_softc) }; static devclass_t ccr_devclass; DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_devclass, ccr_modevent, NULL); MODULE_VERSION(ccr, 1); MODULE_DEPEND(ccr, crypto, 1, 1, 1); MODULE_DEPEND(ccr, t6nex, 1, 1, 1); Index: head/sys/dev/esp/ncr53c9x.c =================================================================== --- head/sys/dev/esp/ncr53c9x.c (revision 327948) +++ head/sys/dev/esp/ncr53c9x.c (revision 327949) @@ -1,3259 +1,3259 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause NetBSD * * Copyright (c) 2004 Scott Long * Copyright (c) 2005, 2008 Marius Strobl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /* $NetBSD: ncr53c9x.c,v 1.145 2012/06/18 21:23:56 martin Exp $ */ /*- * Copyright (c) 1998, 2002 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Charles M. Hannum. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /*- * Copyright (c) 1994 Peter Galbavy * Copyright (c) 1995 Paul Kranenburg * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Peter Galbavy * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Based on aic6360 by Jarle Greipsland * * Acknowledgements: Many of the algorithms used in this driver are * inspired by the work of Julian Elischer (julian@FreeBSD.org) and * Charles Hannum (mycroft@duality.gnu.ai.mit.edu). Thanks a million! */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include devclass_t esp_devclass; MODULE_DEPEND(esp, cam, 1, 1, 1); #ifdef NCR53C9X_DEBUG int ncr53c9x_debug = NCR_SHOWMISC /* | NCR_SHOWPHASE | NCR_SHOWTRAC | NCR_SHOWCMDS */; #endif static void ncr53c9x_abort(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb); static void ncr53c9x_action(struct cam_sim *sim, union ccb *ccb); static void ncr53c9x_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg); static void ncr53c9x_callout(void *arg); static void ncr53c9x_clear(struct ncr53c9x_softc *sc, cam_status result); static void ncr53c9x_clear_target(struct ncr53c9x_softc *sc, int target, cam_status result); static void ncr53c9x_dequeue(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb); static void ncr53c9x_done(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb); static void ncr53c9x_free_ecb(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb); static void ncr53c9x_msgin(struct ncr53c9x_softc *sc); static void ncr53c9x_msgout(struct ncr53c9x_softc *sc); static void ncr53c9x_init(struct ncr53c9x_softc *sc, int doreset); static void ncr53c9x_intr1(struct ncr53c9x_softc *sc); static void ncr53c9x_poll(struct cam_sim *sim); static int ncr53c9x_rdfifo(struct ncr53c9x_softc *sc, int how); static int ncr53c9x_reselect(struct ncr53c9x_softc *sc, int message, int tagtype, int tagid); static void ncr53c9x_reset(struct ncr53c9x_softc *sc); static void ncr53c9x_sense(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb); static void ncr53c9x_sched(struct ncr53c9x_softc *sc); static void ncr53c9x_select(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb); static void ncr53c9x_watch(void *arg); static void ncr53c9x_wrfifo(struct ncr53c9x_softc *sc, uint8_t *p, int len); static struct ncr53c9x_ecb *ncr53c9x_get_ecb(struct ncr53c9x_softc *sc); static struct ncr53c9x_linfo *ncr53c9x_lunsearch(struct ncr53c9x_tinfo *sc, int64_t lun); static inline void ncr53c9x_readregs(struct ncr53c9x_softc *sc); static inline void ncr53c9x_setsync(struct ncr53c9x_softc *sc, struct ncr53c9x_tinfo *ti); static inline int ncr53c9x_stp2cpb(struct ncr53c9x_softc *sc, int period); #define NCR_RDFIFO_START 0 #define NCR_RDFIFO_CONTINUE 1 #define NCR_SET_COUNT(sc, size) do { \ NCR_WRITE_REG((sc), NCR_TCL, (size)); \ NCR_WRITE_REG((sc), NCR_TCM, (size) >> 8); \ if ((sc->sc_features & NCR_F_LARGEXFER) != 0) \ NCR_WRITE_REG((sc), NCR_TCH, (size) >> 16); \ if (sc->sc_rev == NCR_VARIANT_FAS366) \ NCR_WRITE_REG(sc, NCR_RCH, 0); \ } while (/* CONSTCOND */0) #ifndef mstohz #define mstohz(ms) \ (((ms) < 0x20000) ? \ ((ms +0u) / 1000u) * hz : \ ((ms +0u) * hz) /1000u) #endif /* * Names for the NCR53c9x variants, corresponding to the variant tags * in ncr53c9xvar.h. */ static const char *ncr53c9x_variant_names[] = { "ESP100", "ESP100A", "ESP200", "NCR53C94", "NCR53C96", "ESP406", "FAS408", "FAS216", "AM53C974", "FAS366/HME", "NCR53C90 (86C01)", "FAS100A", "FAS236", }; /* * Search linked list for LUN info by LUN id. */ static struct ncr53c9x_linfo * ncr53c9x_lunsearch(struct ncr53c9x_tinfo *ti, int64_t lun) { struct ncr53c9x_linfo *li; LIST_FOREACH(li, &ti->luns, link) if (li->lun == lun) return (li); return (NULL); } /* * Attach this instance, and then all the sub-devices. */ int ncr53c9x_attach(struct ncr53c9x_softc *sc) { struct cam_devq *devq; struct cam_sim *sim; struct cam_path *path; struct ncr53c9x_ecb *ecb; int error, i; if (NCR_LOCK_INITIALIZED(sc) == 0) { device_printf(sc->sc_dev, "mutex not initialized\n"); return (ENXIO); } callout_init_mtx(&sc->sc_watchdog, &sc->sc_lock, 0); /* * Note, the front-end has set us up to print the chip variation. */ if (sc->sc_rev >= NCR_VARIANT_MAX) { device_printf(sc->sc_dev, "unknown variant %d, devices not " "attached\n", sc->sc_rev); return (EINVAL); } device_printf(sc->sc_dev, "%s, %d MHz, SCSI ID %d\n", ncr53c9x_variant_names[sc->sc_rev], sc->sc_freq, sc->sc_id); sc->sc_ntarg = (sc->sc_rev == NCR_VARIANT_FAS366) ? 16 : 8; /* * Allocate SCSI message buffers. * Front-ends can override allocation to avoid alignment * handling in the DMA engines. Note that ncr53c9x_msgout() * can request a 1 byte DMA transfer. */ if (sc->sc_omess == NULL) { sc->sc_omess_self = 1; sc->sc_omess = malloc(NCR_MAX_MSG_LEN, M_DEVBUF, M_NOWAIT); if (sc->sc_omess == NULL) { device_printf(sc->sc_dev, "cannot allocate MSGOUT buffer\n"); return (ENOMEM); } } else sc->sc_omess_self = 0; if (sc->sc_imess == NULL) { sc->sc_imess_self = 1; sc->sc_imess = malloc(NCR_MAX_MSG_LEN + 1, M_DEVBUF, M_NOWAIT); if (sc->sc_imess == NULL) { device_printf(sc->sc_dev, "cannot allocate MSGIN buffer\n"); error = ENOMEM; goto fail_omess; } } else sc->sc_imess_self = 0; - sc->sc_tinfo = malloc(sc->sc_ntarg * sizeof(sc->sc_tinfo[0]), + sc->sc_tinfo = mallocarray(sc->sc_ntarg, sizeof(sc->sc_tinfo[0]), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->sc_tinfo == NULL) { device_printf(sc->sc_dev, "cannot allocate target info buffer\n"); error = ENOMEM; goto fail_imess; } /* * Treat NCR53C90 with the 86C01 DMA chip exactly as ESP100 * from now on. */ if (sc->sc_rev == NCR_VARIANT_NCR53C90_86C01) sc->sc_rev = NCR_VARIANT_ESP100; sc->sc_ccf = FREQTOCCF(sc->sc_freq); /* The value *must not* be == 1. Make it 2. */ if (sc->sc_ccf == 1) sc->sc_ccf = 2; /* * The recommended timeout is 250ms. This register is loaded * with a value calculated as follows, from the docs: * * (timeout period) x (CLK frequency) * reg = ------------------------------------- * 8192 x (Clock Conversion Factor) * * Since CCF has a linear relation to CLK, this generally computes * to the constant of 153. */ sc->sc_timeout = ((250 * 1000) * sc->sc_freq) / (8192 * sc->sc_ccf); /* The CCF register only has 3 bits; 0 is actually 8. */ sc->sc_ccf &= 7; /* * Register with CAM. */ devq = cam_simq_alloc(sc->sc_ntarg); if (devq == NULL) { device_printf(sc->sc_dev, "cannot allocate device queue\n"); error = ENOMEM; goto fail_tinfo; } sim = cam_sim_alloc(ncr53c9x_action, ncr53c9x_poll, "esp", sc, device_get_unit(sc->sc_dev), &sc->sc_lock, 1, NCR_TAG_DEPTH, devq); if (sim == NULL) { device_printf(sc->sc_dev, "cannot allocate SIM entry\n"); error = ENOMEM; goto fail_devq; } NCR_LOCK(sc); if (xpt_bus_register(sim, sc->sc_dev, 0) != CAM_SUCCESS) { device_printf(sc->sc_dev, "cannot register bus\n"); error = EIO; goto fail_lock; } if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { device_printf(sc->sc_dev, "cannot create path\n"); error = EIO; goto fail_bus; } if (xpt_register_async(AC_LOST_DEVICE, ncr53c9x_async, sim, path) != CAM_REQ_CMP) { device_printf(sc->sc_dev, "cannot register async handler\n"); error = EIO; goto fail_path; } sc->sc_sim = sim; sc->sc_path = path; /* Reset state and bus. */ #if 0 sc->sc_cfflags = sc->sc_dev.dv_cfdata->cf_flags; #else sc->sc_cfflags = 0; #endif sc->sc_state = 0; ncr53c9x_init(sc, 1); TAILQ_INIT(&sc->free_list); if ((sc->ecb_array = malloc(sizeof(struct ncr53c9x_ecb) * NCR_TAG_DEPTH, M_DEVBUF, M_NOWAIT | M_ZERO)) == NULL) { device_printf(sc->sc_dev, "cannot allocate ECB array\n"); error = ENOMEM; goto fail_async; } for (i = 0; i < NCR_TAG_DEPTH; i++) { ecb = &sc->ecb_array[i]; ecb->sc = sc; ecb->tag_id = i; callout_init_mtx(&ecb->ch, &sc->sc_lock, 0); TAILQ_INSERT_HEAD(&sc->free_list, ecb, free_links); } callout_reset(&sc->sc_watchdog, 60 * hz, ncr53c9x_watch, sc); NCR_UNLOCK(sc); return (0); fail_async: xpt_register_async(0, ncr53c9x_async, sim, path); fail_path: xpt_free_path(path); fail_bus: xpt_bus_deregister(cam_sim_path(sim)); fail_lock: NCR_UNLOCK(sc); cam_sim_free(sim, TRUE); fail_devq: cam_simq_free(devq); fail_tinfo: free(sc->sc_tinfo, M_DEVBUF); fail_imess: if (sc->sc_imess_self) free(sc->sc_imess, M_DEVBUF); fail_omess: if (sc->sc_omess_self) free(sc->sc_omess, M_DEVBUF); return (error); } int ncr53c9x_detach(struct ncr53c9x_softc *sc) { struct ncr53c9x_linfo *li, *nextli; int t; callout_drain(&sc->sc_watchdog); NCR_LOCK(sc); if (sc->sc_tinfo) { /* Cancel all commands. */ ncr53c9x_clear(sc, CAM_REQ_ABORTED); /* Free logical units. */ for (t = 0; t < sc->sc_ntarg; t++) { for (li = LIST_FIRST(&sc->sc_tinfo[t].luns); li; li = nextli) { nextli = LIST_NEXT(li, link); free(li, M_DEVBUF); } } } xpt_register_async(0, ncr53c9x_async, sc->sc_sim, sc->sc_path); xpt_free_path(sc->sc_path); xpt_bus_deregister(cam_sim_path(sc->sc_sim)); cam_sim_free(sc->sc_sim, TRUE); NCR_UNLOCK(sc); free(sc->ecb_array, M_DEVBUF); free(sc->sc_tinfo, M_DEVBUF); if (sc->sc_imess_self) free(sc->sc_imess, M_DEVBUF); if (sc->sc_omess_self) free(sc->sc_omess, M_DEVBUF); return (0); } /* * This is the generic ncr53c9x reset function. It does not reset the SCSI * bus, only this controller, but kills any on-going commands, and also stops * and resets the DMA. * * After reset, registers are loaded with the defaults from the attach * routine above. */ static void ncr53c9x_reset(struct ncr53c9x_softc *sc) { NCR_LOCK_ASSERT(sc, MA_OWNED); /* Reset DMA first. */ NCRDMA_RESET(sc); /* Reset SCSI chip. */ NCRCMD(sc, NCRCMD_RSTCHIP); NCRCMD(sc, NCRCMD_NOP); DELAY(500); /* Do these backwards, and fall through. */ switch (sc->sc_rev) { case NCR_VARIANT_ESP406: case NCR_VARIANT_FAS408: NCR_WRITE_REG(sc, NCR_CFG5, sc->sc_cfg5 | NCRCFG5_SINT); NCR_WRITE_REG(sc, NCR_CFG4, sc->sc_cfg4); /* FALLTHROUGH */ case NCR_VARIANT_AM53C974: case NCR_VARIANT_FAS100A: case NCR_VARIANT_FAS216: case NCR_VARIANT_FAS236: case NCR_VARIANT_NCR53C94: case NCR_VARIANT_NCR53C96: case NCR_VARIANT_ESP200: sc->sc_features |= NCR_F_HASCFG3; NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3); /* FALLTHROUGH */ case NCR_VARIANT_ESP100A: sc->sc_features |= NCR_F_SELATN3; if ((sc->sc_cfg2 & NCRCFG2_FE) != 0) sc->sc_features |= NCR_F_LARGEXFER; NCR_WRITE_REG(sc, NCR_CFG2, sc->sc_cfg2); /* FALLTHROUGH */ case NCR_VARIANT_ESP100: NCR_WRITE_REG(sc, NCR_CFG1, sc->sc_cfg1); NCR_WRITE_REG(sc, NCR_CCF, sc->sc_ccf); NCR_WRITE_REG(sc, NCR_SYNCOFF, 0); NCR_WRITE_REG(sc, NCR_TIMEOUT, sc->sc_timeout); break; case NCR_VARIANT_FAS366: sc->sc_features |= NCR_F_HASCFG3 | NCR_F_FASTSCSI | NCR_F_SELATN3 | NCR_F_LARGEXFER; sc->sc_cfg3 = NCRFASCFG3_FASTCLK | NCRFASCFG3_OBAUTO; if (sc->sc_id > 7) sc->sc_cfg3 |= NCRFASCFG3_IDBIT3; sc->sc_cfg3_fscsi = NCRFASCFG3_FASTSCSI; NCR_WRITE_REG(sc, NCR_CFG3, sc->sc_cfg3); sc->sc_cfg2 = NCRCFG2_HMEFE | NCRCFG2_HME32; NCR_WRITE_REG(sc, NCR_CFG2, sc->sc_cfg2); NCR_WRITE_REG(sc, NCR_CFG1, sc->sc_cfg1); NCR_WRITE_REG(sc, NCR_CCF, sc->sc_ccf); NCR_WRITE_REG(sc, NCR_SYNCOFF, 0); NCR_WRITE_REG(sc, NCR_TIMEOUT, sc->sc_timeout); break; default: device_printf(sc->sc_dev, "unknown revision code, assuming ESP100\n"); NCR_WRITE_REG(sc, NCR_CFG1, sc->sc_cfg1); NCR_WRITE_REG(sc, NCR_CCF, sc->sc_ccf); NCR_WRITE_REG(sc, NCR_SYNCOFF, 0); NCR_WRITE_REG(sc, NCR_TIMEOUT, sc->sc_timeout); } if (sc->sc_rev == NCR_VARIANT_AM53C974) NCR_WRITE_REG(sc, NCR_AMDCFG4, sc->sc_cfg4); #if 0 device_printf(sc->sc_dev, "%s: revision %d\n", __func__, sc->sc_rev); device_printf(sc->sc_dev, "%s: cfg1 0x%x, cfg2 0x%x, cfg3 0x%x, ccf " "0x%x, timeout 0x%x\n", __func__, sc->sc_cfg1, sc->sc_cfg2, sc->sc_cfg3, sc->sc_ccf, sc->sc_timeout); #endif } /* * Clear all commands. */ static void ncr53c9x_clear(struct ncr53c9x_softc *sc, cam_status result) { struct ncr53c9x_ecb *ecb; int r; NCR_LOCK_ASSERT(sc, MA_OWNED); /* Cancel any active commands. */ sc->sc_state = NCR_CLEANING; sc->sc_msgify = 0; ecb = sc->sc_nexus; if (ecb != NULL) { ecb->ccb->ccb_h.status = result; ncr53c9x_done(sc, ecb); } /* Cancel outstanding disconnected commands. */ for (r = 0; r < sc->sc_ntarg; r++) ncr53c9x_clear_target(sc, r, result); } /* * Clear all commands for a specific target. */ static void ncr53c9x_clear_target(struct ncr53c9x_softc *sc, int target, cam_status result) { struct ncr53c9x_ecb *ecb; struct ncr53c9x_linfo *li; int i; NCR_LOCK_ASSERT(sc, MA_OWNED); /* Cancel outstanding disconnected commands on each LUN. */ LIST_FOREACH(li, &sc->sc_tinfo[target].luns, link) { ecb = li->untagged; if (ecb != NULL) { li->untagged = NULL; /* * XXX should we terminate a command * that never reached the disk? */ li->busy = 0; ecb->ccb->ccb_h.status = result; ncr53c9x_done(sc, ecb); } for (i = 0; i < NCR_TAG_DEPTH; i++) { ecb = li->queued[i]; if (ecb != NULL) { li->queued[i] = NULL; ecb->ccb->ccb_h.status = result; ncr53c9x_done(sc, ecb); } } li->used = 0; } } /* * Initialize ncr53c9x state machine. */ static void ncr53c9x_init(struct ncr53c9x_softc *sc, int doreset) { struct ncr53c9x_tinfo *ti; int r; NCR_LOCK_ASSERT(sc, MA_OWNED); NCR_MISC(("[NCR_INIT(%d) %d] ", doreset, sc->sc_state)); if (sc->sc_state == 0) { /* First time through; initialize. */ TAILQ_INIT(&sc->ready_list); sc->sc_nexus = NULL; memset(sc->sc_tinfo, 0, sizeof(*sc->sc_tinfo)); for (r = 0; r < sc->sc_ntarg; r++) { LIST_INIT(&sc->sc_tinfo[r].luns); } } else ncr53c9x_clear(sc, CAM_CMD_TIMEOUT); /* * Reset the chip to a known state. */ ncr53c9x_reset(sc); sc->sc_flags = 0; sc->sc_msgpriq = sc->sc_msgout = sc->sc_msgoutq = 0; sc->sc_phase = sc->sc_prevphase = INVALID_PHASE; /* * If we're the first time through, set the default parameters * for all targets. Otherwise we only clear their current transfer * settings so we'll renegotiate their goal settings with the next * command. */ if (sc->sc_state == 0) { for (r = 0; r < sc->sc_ntarg; r++) { ti = &sc->sc_tinfo[r]; /* XXX - config flags per target: low bits: no reselect; high bits: no synch */ ti->flags = ((sc->sc_minsync != 0 && (sc->sc_cfflags & (1 << ((r & 7) + 8))) == 0) ? 0 : T_SYNCHOFF) | ((sc->sc_cfflags & (1 << (r & 7))) == 0 ? 0 : T_RSELECTOFF); ti->curr.period = ti->goal.period = 0; ti->curr.offset = ti->goal.offset = 0; ti->curr.width = ti->goal.width = MSG_EXT_WDTR_BUS_8_BIT; } } else { for (r = 0; r < sc->sc_ntarg; r++) { ti = &sc->sc_tinfo[r]; ti->flags &= ~(T_SDTRSENT | T_WDTRSENT); ti->curr.period = 0; ti->curr.offset = 0; ti->curr.width = MSG_EXT_WDTR_BUS_8_BIT; } } if (doreset) { sc->sc_state = NCR_SBR; NCRCMD(sc, NCRCMD_RSTSCSI); /* Give the bus a fighting chance to settle. */ DELAY(250000); } else { sc->sc_state = NCR_IDLE; ncr53c9x_sched(sc); } } /* * Read the NCR registers, and save their contents for later use. * NCR_STAT, NCR_STEP & NCR_INTR are mostly zeroed out when reading * NCR_INTR - so make sure it is the last read. * * I think that (from reading the docs) most bits in these registers * only make sense when the DMA CSR has an interrupt showing. Call only * if an interrupt is pending. */ static inline void ncr53c9x_readregs(struct ncr53c9x_softc *sc) { NCR_LOCK_ASSERT(sc, MA_OWNED); sc->sc_espstat = NCR_READ_REG(sc, NCR_STAT); /* Only the step bits are of interest. */ sc->sc_espstep = NCR_READ_REG(sc, NCR_STEP) & NCRSTEP_MASK; if (sc->sc_rev == NCR_VARIANT_FAS366) sc->sc_espstat2 = NCR_READ_REG(sc, NCR_STAT2); sc->sc_espintr = NCR_READ_REG(sc, NCR_INTR); /* * Determine the SCSI bus phase, return either a real SCSI bus phase * or some pseudo phase we use to detect certain exceptions. */ sc->sc_phase = (sc->sc_espintr & NCRINTR_DIS) ? BUSFREE_PHASE : sc->sc_espstat & NCRSTAT_PHASE; NCR_INTS(("regs[intr=%02x,stat=%02x,step=%02x,stat2=%02x] ", sc->sc_espintr, sc->sc_espstat, sc->sc_espstep, sc->sc_espstat2)); } /* * Convert Synchronous Transfer Period to chip register Clock Per Byte value. */ static inline int ncr53c9x_stp2cpb(struct ncr53c9x_softc *sc, int period) { int v; NCR_LOCK_ASSERT(sc, MA_OWNED); v = (sc->sc_freq * period) / 250; if (ncr53c9x_cpb2stp(sc, v) < period) /* Correct round-down error. */ v++; return (v); } static inline void ncr53c9x_setsync(struct ncr53c9x_softc *sc, struct ncr53c9x_tinfo *ti) { uint8_t cfg3, syncoff, synctp; NCR_LOCK_ASSERT(sc, MA_OWNED); cfg3 = sc->sc_cfg3; if (ti->curr.offset != 0) { syncoff = ti->curr.offset; synctp = ncr53c9x_stp2cpb(sc, ti->curr.period); if (sc->sc_features & NCR_F_FASTSCSI) { /* * If the period is 200ns or less (ti->period <= 50), * put the chip in Fast SCSI mode. */ if (ti->curr.period <= 50) /* * There are (at least) 4 variations of the * configuration 3 register. The drive attach * routine sets the appropriate bit to put the * chip into Fast SCSI mode so that it doesn't * have to be figured out here each time. */ cfg3 |= sc->sc_cfg3_fscsi; } /* * Am53c974 requires different SYNCTP values when the * FSCSI bit is off. */ if (sc->sc_rev == NCR_VARIANT_AM53C974 && (cfg3 & NCRAMDCFG3_FSCSI) == 0) synctp--; } else { syncoff = 0; synctp = 0; } if (ti->curr.width != MSG_EXT_WDTR_BUS_8_BIT) { if (sc->sc_rev == NCR_VARIANT_FAS366) cfg3 |= NCRFASCFG3_EWIDE; } if (sc->sc_features & NCR_F_HASCFG3) NCR_WRITE_REG(sc, NCR_CFG3, cfg3); NCR_WRITE_REG(sc, NCR_SYNCOFF, syncoff); NCR_WRITE_REG(sc, NCR_SYNCTP, synctp); } /* * Send a command to a target, set the driver state to NCR_SELECTING * and let the caller take care of the rest. * * Keeping this as a function allows me to say that this may be done * by DMA instead of programmed I/O soon. */ static void ncr53c9x_select(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb) { struct ncr53c9x_tinfo *ti; uint8_t *cmd; size_t dmasize; int clen, error, selatn3, selatns; int lun = ecb->ccb->ccb_h.target_lun; int target = ecb->ccb->ccb_h.target_id; NCR_LOCK_ASSERT(sc, MA_OWNED); NCR_TRACE(("[%s(t%d,l%d,cmd:%x,tag:%x,%x)] ", __func__, target, lun, ecb->cmd.cmd.opcode, ecb->tag[0], ecb->tag[1])); ti = &sc->sc_tinfo[target]; sc->sc_state = NCR_SELECTING; /* * Schedule the callout now, the first time we will go away * expecting to come back due to an interrupt, because it is * always possible that the interrupt may never happen. */ callout_reset(&ecb->ch, mstohz(ecb->timeout), ncr53c9x_callout, ecb); /* * The docs say the target register is never reset, and I * can't think of a better place to set it. */ if (sc->sc_rev == NCR_VARIANT_FAS366) { NCRCMD(sc, NCRCMD_FLUSH); NCR_WRITE_REG(sc, NCR_SELID, target | NCR_BUSID_HMEXC32 | NCR_BUSID_HMEENCID); } else NCR_WRITE_REG(sc, NCR_SELID, target); /* * If we are requesting sense, force a renegotiation if we are * currently using anything different from asynchronous at 8 bit * as the target might have lost our transfer negotiations. */ if ((ecb->flags & ECB_SENSE) != 0 && (ti->curr.offset != 0 || ti->curr.width != MSG_EXT_WDTR_BUS_8_BIT)) { ti->curr.period = 0; ti->curr.offset = 0; ti->curr.width = MSG_EXT_WDTR_BUS_8_BIT; } ncr53c9x_setsync(sc, ti); selatn3 = selatns = 0; if (ecb->tag[0] != 0) { if (sc->sc_features & NCR_F_SELATN3) /* Use SELATN3 to send tag messages. */ selatn3 = 1; else /* We don't have SELATN3; use SELATNS to send tags. */ selatns = 1; } if (ti->curr.period != ti->goal.period || ti->curr.offset != ti->goal.offset || ti->curr.width != ti->goal.width) { /* We have to use SELATNS to send sync/wide messages. */ selatn3 = 0; selatns = 1; } cmd = (uint8_t *)&ecb->cmd.cmd; if (selatn3) { /* We'll use tags with SELATN3. */ clen = ecb->clen + 3; cmd -= 3; cmd[0] = MSG_IDENTIFY(lun, 1); /* msg[0] */ cmd[1] = ecb->tag[0]; /* msg[1] */ cmd[2] = ecb->tag[1]; /* msg[2] */ } else { /* We don't have tags, or will send messages with SELATNS. */ clen = ecb->clen + 1; cmd -= 1; cmd[0] = MSG_IDENTIFY(lun, (ti->flags & T_RSELECTOFF) == 0); } if ((sc->sc_features & NCR_F_DMASELECT) && !selatns) { /* Setup DMA transfer for command. */ dmasize = clen; sc->sc_cmdlen = clen; sc->sc_cmdp = cmd; error = NCRDMA_SETUP(sc, &sc->sc_cmdp, &sc->sc_cmdlen, 0, &dmasize); if (error != 0) goto cmd; /* Program the SCSI counter. */ NCR_SET_COUNT(sc, dmasize); /* Load the count in. */ NCRCMD(sc, NCRCMD_NOP | NCRCMD_DMA); /* And get the target's attention. */ if (selatn3) { sc->sc_msgout = SEND_TAG; sc->sc_flags |= NCR_ATN; NCRCMD(sc, NCRCMD_SELATN3 | NCRCMD_DMA); } else NCRCMD(sc, NCRCMD_SELATN | NCRCMD_DMA); NCRDMA_GO(sc); return; } cmd: /* * Who am I? This is where we tell the target that we are * happy for it to disconnect etc. */ /* Now get the command into the FIFO. */ sc->sc_cmdlen = 0; ncr53c9x_wrfifo(sc, cmd, clen); /* And get the target's attention. */ if (selatns) { NCR_MSGS(("SELATNS \n")); /* Arbitrate, select and stop after IDENTIFY message. */ NCRCMD(sc, NCRCMD_SELATNS); } else if (selatn3) { sc->sc_msgout = SEND_TAG; sc->sc_flags |= NCR_ATN; NCRCMD(sc, NCRCMD_SELATN3); } else NCRCMD(sc, NCRCMD_SELATN); } static void ncr53c9x_free_ecb(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb) { NCR_LOCK_ASSERT(sc, MA_OWNED); ecb->flags = 0; TAILQ_INSERT_TAIL(&sc->free_list, ecb, free_links); } static struct ncr53c9x_ecb * ncr53c9x_get_ecb(struct ncr53c9x_softc *sc) { struct ncr53c9x_ecb *ecb; NCR_LOCK_ASSERT(sc, MA_OWNED); ecb = TAILQ_FIRST(&sc->free_list); if (ecb) { if (ecb->flags != 0) panic("%s: ecb flags not cleared", __func__); TAILQ_REMOVE(&sc->free_list, ecb, free_links); ecb->flags = ECB_ALLOC; bzero(&ecb->ccb, sizeof(struct ncr53c9x_ecb) - offsetof(struct ncr53c9x_ecb, ccb)); } return (ecb); } /* * DRIVER FUNCTIONS CALLABLE FROM HIGHER LEVEL DRIVERS: */ /* * Start a SCSI-command. * This function is called by the higher level SCSI-driver to queue/run * SCSI-commands. */ static void ncr53c9x_action(struct cam_sim *sim, union ccb *ccb) { struct ccb_pathinq *cpi; struct ccb_scsiio *csio; struct ccb_trans_settings *cts; struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; struct ncr53c9x_ecb *ecb; struct ncr53c9x_softc *sc; struct ncr53c9x_tinfo *ti; int target; sc = cam_sim_softc(sim); NCR_LOCK_ASSERT(sc, MA_OWNED); NCR_TRACE(("[%s %d]", __func__, ccb->ccb_h.func_code)); switch (ccb->ccb_h.func_code) { case XPT_RESET_BUS: ncr53c9x_init(sc, 1); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_CALC_GEOMETRY: cam_calc_geometry(&ccb->ccg, sc->sc_extended_geom); break; case XPT_PATH_INQ: cpi = &ccb->cpi; cpi->version_num = 1; cpi->hba_inquiry = PI_SDTR_ABLE | PI_TAG_ABLE; cpi->hba_inquiry |= (sc->sc_rev == NCR_VARIANT_FAS366) ? PI_WIDE_16 : 0; cpi->target_sprt = 0; cpi->hba_misc = 0; cpi->hba_eng_cnt = 0; cpi->max_target = sc->sc_ntarg - 1; cpi->max_lun = 7; cpi->initiator_id = sc->sc_id; strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "NCR", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->bus_id = 0; cpi->base_transfer_speed = 3300; cpi->protocol = PROTO_SCSI; cpi->protocol_version = SCSI_REV_2; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->maxio = sc->sc_maxxfer; ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_GET_TRAN_SETTINGS: cts = &ccb->cts; ti = &sc->sc_tinfo[ccb->ccb_h.target_id]; scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; cts->protocol = PROTO_SCSI; cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { spi->sync_period = ti->curr.period; spi->sync_offset = ti->curr.offset; spi->bus_width = ti->curr.width; if ((ti->flags & T_TAG) != 0) { spi->flags |= CTS_SPI_FLAGS_DISC_ENB; scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } else { spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB; scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB; } } else { if ((ti->flags & T_SYNCHOFF) != 0) { spi->sync_period = 0; spi->sync_offset = 0; } else { spi->sync_period = sc->sc_minsync; spi->sync_offset = sc->sc_maxoffset; } spi->bus_width = sc->sc_maxwidth; spi->flags |= CTS_SPI_FLAGS_DISC_ENB; scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->valid = CTS_SPI_VALID_BUS_WIDTH | CTS_SPI_VALID_SYNC_RATE | CTS_SPI_VALID_SYNC_OFFSET | CTS_SPI_VALID_DISC; scsi->valid = CTS_SCSI_VALID_TQ; ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_ABORT: device_printf(sc->sc_dev, "XPT_ABORT called\n"); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_TERM_IO: device_printf(sc->sc_dev, "XPT_TERM_IO called\n"); ccb->ccb_h.status = CAM_FUNC_NOTAVAIL; break; case XPT_RESET_DEV: case XPT_SCSI_IO: if (ccb->ccb_h.target_id >= sc->sc_ntarg) { ccb->ccb_h.status = CAM_PATH_INVALID; goto done; } /* Get an ECB to use. */ ecb = ncr53c9x_get_ecb(sc); /* * This should never happen as we track resources * in the mid-layer. */ if (ecb == NULL) { xpt_freeze_simq(sim, 1); ccb->ccb_h.status = CAM_REQUEUE_REQ; device_printf(sc->sc_dev, "unable to allocate ecb\n"); goto done; } /* Initialize ecb. */ ecb->ccb = ccb; ecb->timeout = ccb->ccb_h.timeout; if (ccb->ccb_h.func_code == XPT_RESET_DEV) { ecb->flags |= ECB_RESET; ecb->clen = 0; ecb->dleft = 0; } else { csio = &ccb->csio; if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) bcopy(csio->cdb_io.cdb_ptr, &ecb->cmd.cmd, csio->cdb_len); else bcopy(csio->cdb_io.cdb_bytes, &ecb->cmd.cmd, csio->cdb_len); ecb->clen = csio->cdb_len; ecb->daddr = csio->data_ptr; ecb->dleft = csio->dxfer_len; } ecb->stat = 0; TAILQ_INSERT_TAIL(&sc->ready_list, ecb, chain); ecb->flags |= ECB_READY; if (sc->sc_state == NCR_IDLE) ncr53c9x_sched(sc); return; case XPT_SET_TRAN_SETTINGS: cts = &ccb->cts; target = ccb->ccb_h.target_id; ti = &sc->sc_tinfo[target]; scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { if ((sc->sc_cfflags & (1<<((target & 7) + 16))) == 0 && (scsi->flags & CTS_SCSI_FLAGS_TAG_ENB)) { NCR_MISC(("%s: target %d: tagged queuing\n", device_get_nameunit(sc->sc_dev), target)); ti->flags |= T_TAG; } else ti->flags &= ~T_TAG; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { NCR_MISC(("%s: target %d: wide negotiation\n", device_get_nameunit(sc->sc_dev), target)); ti->goal.width = spi->bus_width; } if ((spi->valid & CTS_SPI_VALID_SYNC_RATE) != 0) { NCR_MISC(("%s: target %d: sync period negotiation\n", device_get_nameunit(sc->sc_dev), target)); ti->goal.period = spi->sync_period; } if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) != 0) { NCR_MISC(("%s: target %d: sync offset negotiation\n", device_get_nameunit(sc->sc_dev), target)); ti->goal.offset = spi->sync_offset; } ccb->ccb_h.status = CAM_REQ_CMP; break; default: device_printf(sc->sc_dev, "Unhandled function code %d\n", ccb->ccb_h.func_code); ccb->ccb_h.status = CAM_PROVIDE_FAIL; } done: xpt_done(ccb); } /* * Used when interrupt driven I/O is not allowed, e.g. during boot. */ static void ncr53c9x_poll(struct cam_sim *sim) { struct ncr53c9x_softc *sc; sc = cam_sim_softc(sim); NCR_LOCK_ASSERT(sc, MA_OWNED); NCR_TRACE(("[%s] ", __func__)); if (NCRDMA_ISINTR(sc)) ncr53c9x_intr1(sc); } /* * Asynchronous notification handler */ static void ncr53c9x_async(void *cbarg, uint32_t code, struct cam_path *path, void *arg) { struct ncr53c9x_softc *sc; struct ncr53c9x_tinfo *ti; int target; sc = cam_sim_softc(cbarg); NCR_LOCK_ASSERT(sc, MA_OWNED); switch (code) { case AC_LOST_DEVICE: target = xpt_path_target_id(path); if (target < 0 || target >= sc->sc_ntarg) break; /* Cancel outstanding disconnected commands. */ ncr53c9x_clear_target(sc, target, CAM_REQ_ABORTED); /* Set the default parameters for the target. */ ti = &sc->sc_tinfo[target]; /* XXX - config flags per target: low bits: no reselect; high bits: no synch */ ti->flags = ((sc->sc_minsync != 0 && (sc->sc_cfflags & (1 << ((target & 7) + 8))) == 0) ? 0 : T_SYNCHOFF) | ((sc->sc_cfflags & (1 << (target & 7))) == 0 ? 0 : T_RSELECTOFF); ti->curr.period = ti->goal.period = 0; ti->curr.offset = ti->goal.offset = 0; ti->curr.width = ti->goal.width = MSG_EXT_WDTR_BUS_8_BIT; break; } } /* * LOW LEVEL SCSI UTILITIES */ /* * Schedule a SCSI operation. This has now been pulled out of the interrupt * handler so that we may call it from ncr53c9x_action and ncr53c9x_done. * This may save us an unnecessary interrupt just to get things going. * Should only be called when state == NCR_IDLE and with sc_lock held. */ static void ncr53c9x_sched(struct ncr53c9x_softc *sc) { struct ncr53c9x_ecb *ecb; struct ncr53c9x_linfo *li; struct ncr53c9x_tinfo *ti; int lun, tag; NCR_LOCK_ASSERT(sc, MA_OWNED); NCR_TRACE(("[%s] ", __func__)); if (sc->sc_state != NCR_IDLE) panic("%s: not IDLE (state=%d)", __func__, sc->sc_state); /* * Find first ecb in ready queue that is for a target/lunit * combinations that is not busy. */ TAILQ_FOREACH(ecb, &sc->ready_list, chain) { ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id]; lun = ecb->ccb->ccb_h.target_lun; /* Select type of tag for this command */ if ((ti->flags & (T_RSELECTOFF | T_TAG)) != T_TAG) tag = 0; else if ((ecb->flags & ECB_SENSE) != 0) tag = 0; else if ((ecb->ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) == 0) tag = 0; else if (ecb->ccb->csio.tag_action == CAM_TAG_ACTION_NONE) tag = 0; else tag = ecb->ccb->csio.tag_action; li = TINFO_LUN(ti, lun); if (li == NULL) { /* Initialize LUN info and add to list. */ li = malloc(sizeof(*li), M_DEVBUF, M_NOWAIT | M_ZERO); if (li == NULL) continue; li->lun = lun; LIST_INSERT_HEAD(&ti->luns, li, link); if (lun < NCR_NLUN) ti->lun[lun] = li; } li->last_used = time_second; if (tag == 0) { /* Try to issue this as an untagged command. */ if (li->untagged == NULL) li->untagged = ecb; } if (li->untagged != NULL) { tag = 0; if ((li->busy != 1) && li->used == 0) { /* * We need to issue this untagged command * now. */ ecb = li->untagged; } else { /* not ready, yet */ continue; } } ecb->tag[0] = tag; if (tag != 0) { li->queued[ecb->tag_id] = ecb; ecb->tag[1] = ecb->tag_id; li->used++; } if (li->untagged != NULL && (li->busy != 1)) { li->busy = 1; TAILQ_REMOVE(&sc->ready_list, ecb, chain); ecb->flags &= ~ECB_READY; sc->sc_nexus = ecb; ncr53c9x_select(sc, ecb); break; } if (li->untagged == NULL && tag != 0) { TAILQ_REMOVE(&sc->ready_list, ecb, chain); ecb->flags &= ~ECB_READY; sc->sc_nexus = ecb; ncr53c9x_select(sc, ecb); break; } else NCR_TRACE(("[%s %d:%d busy] \n", __func__, ecb->ccb->ccb_h.target_id, ecb->ccb->ccb_h.target_lun)); } } static void ncr53c9x_sense(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb) { union ccb *ccb = ecb->ccb; struct ncr53c9x_linfo *li; struct ncr53c9x_tinfo *ti; struct scsi_request_sense *ss = (void *)&ecb->cmd.cmd; int lun; NCR_LOCK_ASSERT(sc, MA_OWNED); NCR_TRACE(("[%s] ", __func__)); lun = ccb->ccb_h.target_lun; ti = &sc->sc_tinfo[ccb->ccb_h.target_id]; /* Next, setup a REQUEST SENSE command block. */ memset(ss, 0, sizeof(*ss)); ss->opcode = REQUEST_SENSE; ss->byte2 = ccb->ccb_h.target_lun << SCSI_CMD_LUN_SHIFT; ss->length = sizeof(struct scsi_sense_data); ecb->clen = sizeof(*ss); memset(&ccb->csio.sense_data, 0, sizeof(ccb->csio.sense_data)); ecb->daddr = (uint8_t *)&ccb->csio.sense_data; ecb->dleft = sizeof(struct scsi_sense_data); ecb->flags |= ECB_SENSE; ecb->timeout = NCR_SENSE_TIMEOUT; ti->senses++; li = TINFO_LUN(ti, lun); if (li->busy) li->busy = 0; ncr53c9x_dequeue(sc, ecb); li->untagged = ecb; /* Must be executed first to fix C/A. */ li->busy = 2; if (ecb == sc->sc_nexus) ncr53c9x_select(sc, ecb); else { TAILQ_INSERT_HEAD(&sc->ready_list, ecb, chain); ecb->flags |= ECB_READY; if (sc->sc_state == NCR_IDLE) ncr53c9x_sched(sc); } } /* * POST PROCESSING OF SCSI_CMD (usually current) */ static void ncr53c9x_done(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb) { union ccb *ccb = ecb->ccb; struct ncr53c9x_linfo *li; struct ncr53c9x_tinfo *ti; int lun, sense_returned; NCR_LOCK_ASSERT(sc, MA_OWNED); NCR_TRACE(("[%s(status:%x)] ", __func__, ccb->ccb_h.status)); ti = &sc->sc_tinfo[ccb->ccb_h.target_id]; lun = ccb->ccb_h.target_lun; li = TINFO_LUN(ti, lun); callout_stop(&ecb->ch); /* * Now, if we've come here with no error code, i.e. we've kept the * initial CAM_REQ_CMP, and the status code signals that we should * check sense, we'll need to set up a request sense cmd block and * push the command back into the ready queue *before* any other * commands for this target/lunit, else we lose the sense info. * We don't support chk sense conditions for the request sense cmd. */ if (ccb->ccb_h.status == CAM_REQ_CMP) { ccb->csio.scsi_status = ecb->stat; if ((ecb->flags & ECB_ABORT) != 0) ccb->ccb_h.status = CAM_CMD_TIMEOUT; else if ((ecb->flags & ECB_SENSE) != 0 && (ecb->stat != SCSI_STATUS_CHECK_COND)) { ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID; sense_returned = sizeof(ccb->csio.sense_data) - ecb->dleft; if (sense_returned < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - sense_returned; else ccb->csio.sense_resid = 0; } else if (ecb->stat == SCSI_STATUS_CHECK_COND) { if ((ecb->flags & ECB_SENSE) != 0) ccb->ccb_h.status = CAM_AUTOSENSE_FAIL; else { /* First, save the return values. */ ccb->csio.resid = ecb->dleft; if ((ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) { ncr53c9x_sense(sc, ecb); return; } ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; } } else ccb->csio.resid = ecb->dleft; if (ecb->stat == SCSI_STATUS_QUEUE_FULL) ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR; else if (ecb->stat == SCSI_STATUS_BUSY) ccb->ccb_h.status = CAM_SCSI_BUSY; } else if ((ccb->ccb_h.status & CAM_DEV_QFRZN) == 0) { ccb->ccb_h.status |= CAM_DEV_QFRZN; xpt_freeze_devq(ccb->ccb_h.path, 1); } #ifdef NCR53C9X_DEBUG if ((ncr53c9x_debug & NCR_SHOWTRAC) != 0) { if (ccb->csio.resid != 0) printf("resid=%d ", ccb->csio.resid); if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) != 0) printf("sense=0x%02x\n", ccb->csio.sense_data.error_code); else printf("status SCSI=0x%x CAM=0x%x\n", ccb->csio.scsi_status, ccb->ccb_h.status); } #endif /* * Remove the ECB from whatever queue it's on. */ ncr53c9x_dequeue(sc, ecb); if (ecb == sc->sc_nexus) { sc->sc_nexus = NULL; if (sc->sc_state != NCR_CLEANING) { sc->sc_state = NCR_IDLE; ncr53c9x_sched(sc); } } if ((ccb->ccb_h.status & CAM_SEL_TIMEOUT) != 0) { /* Selection timeout -- discard this LUN if empty. */ if (li->untagged == NULL && li->used == 0) { if (lun < NCR_NLUN) ti->lun[lun] = NULL; LIST_REMOVE(li, link); free(li, M_DEVBUF); } } ncr53c9x_free_ecb(sc, ecb); ti->cmds++; xpt_done(ccb); } static void ncr53c9x_dequeue(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb) { struct ncr53c9x_linfo *li; struct ncr53c9x_tinfo *ti; int64_t lun; NCR_LOCK_ASSERT(sc, MA_OWNED); ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id]; lun = ecb->ccb->ccb_h.target_lun; li = TINFO_LUN(ti, lun); #ifdef DIAGNOSTIC if (li == NULL || li->lun != lun) panic("%s: lun %llx for ecb %p does not exist", __func__, (long long)lun, ecb); #endif if (li->untagged == ecb) { li->busy = 0; li->untagged = NULL; } if (ecb->tag[0] && li->queued[ecb->tag[1]] != NULL) { #ifdef DIAGNOSTIC if (li->queued[ecb->tag[1]] != NULL && (li->queued[ecb->tag[1]] != ecb)) panic("%s: slot %d for lun %llx has %p instead of ecb " "%p", __func__, ecb->tag[1], (long long)lun, li->queued[ecb->tag[1]], ecb); #endif li->queued[ecb->tag[1]] = NULL; li->used--; } ecb->tag[0] = ecb->tag[1] = 0; if ((ecb->flags & ECB_READY) != 0) { ecb->flags &= ~ECB_READY; TAILQ_REMOVE(&sc->ready_list, ecb, chain); } } /* * INTERRUPT/PROTOCOL ENGINE */ /* * Schedule an outgoing message by prioritizing it, and asserting * attention on the bus. We can only do this when we are the initiator * else there will be an illegal command interrupt. */ #define ncr53c9x_sched_msgout(m) do { \ NCR_MSGS(("ncr53c9x_sched_msgout %x %d", m, __LINE__)); \ NCRCMD(sc, NCRCMD_SETATN); \ sc->sc_flags |= NCR_ATN; \ sc->sc_msgpriq |= (m); \ } while (/* CONSTCOND */0) static void ncr53c9x_flushfifo(struct ncr53c9x_softc *sc) { NCR_LOCK_ASSERT(sc, MA_OWNED); NCR_TRACE(("[%s] ", __func__)); NCRCMD(sc, NCRCMD_FLUSH); if (sc->sc_phase == COMMAND_PHASE || sc->sc_phase == MESSAGE_OUT_PHASE) DELAY(2); } static int ncr53c9x_rdfifo(struct ncr53c9x_softc *sc, int how) { int i, n; uint8_t *ibuf; NCR_LOCK_ASSERT(sc, MA_OWNED); switch (how) { case NCR_RDFIFO_START: ibuf = sc->sc_imess; sc->sc_imlen = 0; break; case NCR_RDFIFO_CONTINUE: ibuf = sc->sc_imess + sc->sc_imlen; break; default: panic("%s: bad flag", __func__); /* NOTREACHED */ } /* * XXX buffer (sc_imess) size for message */ n = NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF; if (sc->sc_rev == NCR_VARIANT_FAS366) { n *= 2; for (i = 0; i < n; i++) ibuf[i] = NCR_READ_REG(sc, NCR_FIFO); if (sc->sc_espstat2 & NCRFAS_STAT2_ISHUTTLE) { NCR_WRITE_REG(sc, NCR_FIFO, 0); ibuf[i++] = NCR_READ_REG(sc, NCR_FIFO); NCR_READ_REG(sc, NCR_FIFO); ncr53c9x_flushfifo(sc); } } else for (i = 0; i < n; i++) ibuf[i] = NCR_READ_REG(sc, NCR_FIFO); sc->sc_imlen += i; #if 0 #ifdef NCR53C9X_DEBUG NCR_TRACE(("\n[rdfifo %s (%d):", (how == NCR_RDFIFO_START) ? "start" : "cont", (int)sc->sc_imlen)); if ((ncr53c9x_debug & NCR_SHOWTRAC) != 0) { for (i = 0; i < sc->sc_imlen; i++) printf(" %02x", sc->sc_imess[i]); printf("]\n"); } #endif #endif return (sc->sc_imlen); } static void ncr53c9x_wrfifo(struct ncr53c9x_softc *sc, uint8_t *p, int len) { int i; NCR_LOCK_ASSERT(sc, MA_OWNED); #ifdef NCR53C9X_DEBUG NCR_MSGS(("[wrfifo(%d):", len)); if ((ncr53c9x_debug & NCR_SHOWMSGS) != 0) { for (i = 0; i < len; i++) printf(" %02x", p[i]); printf("]\n"); } #endif for (i = 0; i < len; i++) { NCR_WRITE_REG(sc, NCR_FIFO, p[i]); if (sc->sc_rev == NCR_VARIANT_FAS366) NCR_WRITE_REG(sc, NCR_FIFO, 0); } } static int ncr53c9x_reselect(struct ncr53c9x_softc *sc, int message, int tagtype, int tagid) { struct ncr53c9x_ecb *ecb = NULL; struct ncr53c9x_linfo *li; struct ncr53c9x_tinfo *ti; uint8_t lun, selid, target; NCR_LOCK_ASSERT(sc, MA_OWNED); if (sc->sc_rev == NCR_VARIANT_FAS366) target = sc->sc_selid; else { /* * The SCSI chip made a snapshot of the data bus * while the reselection was being negotiated. * This enables us to determine which target did * the reselect. */ selid = sc->sc_selid & ~(1 << sc->sc_id); if (selid & (selid - 1)) { device_printf(sc->sc_dev, "reselect with invalid " "selid %02x; sending DEVICE RESET\n", selid); goto reset; } target = ffs(selid) - 1; } lun = message & 0x07; /* * Search wait queue for disconnected command. * The list should be short, so I haven't bothered with * any more sophisticated structures than a simple * singly linked list. */ ti = &sc->sc_tinfo[target]; li = TINFO_LUN(ti, lun); /* * We can get as far as the LUN with the IDENTIFY * message. Check to see if we're running an * untagged command. Otherwise ack the IDENTIFY * and wait for a tag message. */ if (li != NULL) { if (li->untagged != NULL && li->busy) ecb = li->untagged; else if (tagtype != MSG_SIMPLE_Q_TAG) { /* Wait for tag to come by. */ sc->sc_state = NCR_IDENTIFIED; return (0); } else if (tagtype) ecb = li->queued[tagid]; } if (ecb == NULL) { device_printf(sc->sc_dev, "reselect from target %d lun %d " "tag %x:%x with no nexus; sending ABORT\n", target, lun, tagtype, tagid); goto abort; } /* Make this nexus active again. */ sc->sc_state = NCR_CONNECTED; sc->sc_nexus = ecb; ncr53c9x_setsync(sc, ti); if (ecb->flags & ECB_RESET) ncr53c9x_sched_msgout(SEND_DEV_RESET); else if (ecb->flags & ECB_ABORT) ncr53c9x_sched_msgout(SEND_ABORT); /* Do an implicit RESTORE POINTERS. */ sc->sc_dp = ecb->daddr; sc->sc_dleft = ecb->dleft; return (0); reset: ncr53c9x_sched_msgout(SEND_DEV_RESET); return (1); abort: ncr53c9x_sched_msgout(SEND_ABORT); return (1); } /* From NetBSD; these should go into CAM at some point. */ #define MSG_ISEXTENDED(m) ((m) == MSG_EXTENDED) #define MSG_IS1BYTE(m) \ ((!MSG_ISEXTENDED(m) && (m) < 0x20) || MSG_ISIDENTIFY(m)) #define MSG_IS2BYTE(m) (((m) & 0xf0) == 0x20) static inline int __verify_msg_format(uint8_t *p, int len) { if (len == 1 && MSG_IS1BYTE(p[0])) return (1); if (len == 2 && MSG_IS2BYTE(p[0])) return (1); if (len >= 3 && MSG_ISEXTENDED(p[0]) && len == p[1] + 2) return (1); return (0); } /* * Get an incoming message as initiator. * * The SCSI bus must already be in MESSAGE_IN_PHASE and there is a * byte in the FIFO. */ static void ncr53c9x_msgin(struct ncr53c9x_softc *sc) { struct ncr53c9x_ecb *ecb; struct ncr53c9x_linfo *li; struct ncr53c9x_tinfo *ti; uint8_t *pb; int len, lun; NCR_LOCK_ASSERT(sc, MA_OWNED); NCR_TRACE(("[%s(curmsglen:%ld)] ", __func__, (long)sc->sc_imlen)); if (sc->sc_imlen == 0) { device_printf(sc->sc_dev, "msgin: no msg byte available\n"); return; } /* * Prepare for a new message. A message should (according * to the SCSI standard) be transmitted in one single * MESSAGE_IN_PHASE. If we have been in some other phase, * then this is a new message. */ if (sc->sc_prevphase != MESSAGE_IN_PHASE && sc->sc_state != NCR_RESELECTED) { device_printf(sc->sc_dev, "phase change, dropping message, " "prev %d, state %d\n", sc->sc_prevphase, sc->sc_state); sc->sc_flags &= ~NCR_DROP_MSGI; sc->sc_imlen = 0; } /* * If we're going to reject the message, don't bother storing * the incoming bytes. But still, we need to ACK them. */ if ((sc->sc_flags & NCR_DROP_MSGI) != 0) { NCRCMD(sc, NCRCMD_MSGOK); device_printf(sc->sc_dev, "", sc->sc_imess[sc->sc_imlen]); return; } if (sc->sc_imlen >= NCR_MAX_MSG_LEN) { ncr53c9x_sched_msgout(SEND_REJECT); sc->sc_flags |= NCR_DROP_MSGI; } else { switch (sc->sc_state) { /* * if received message is the first of reselection * then first byte is selid, and then message */ case NCR_RESELECTED: pb = sc->sc_imess + 1; len = sc->sc_imlen - 1; break; default: pb = sc->sc_imess; len = sc->sc_imlen; } if (__verify_msg_format(pb, len)) goto gotit; } /* Acknowledge what we have so far. */ NCRCMD(sc, NCRCMD_MSGOK); return; gotit: NCR_MSGS(("gotmsg(%x) state %d", sc->sc_imess[0], sc->sc_state)); /* * We got a complete message, flush the imess. * XXX nobody uses imlen below. */ sc->sc_imlen = 0; /* * Now we should have a complete message (1 byte, 2 byte * and moderately long extended messages). We only handle * extended messages which total length is shorter than * NCR_MAX_MSG_LEN. Longer messages will be amputated. */ switch (sc->sc_state) { case NCR_CONNECTED: ecb = sc->sc_nexus; ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id]; switch (sc->sc_imess[0]) { case MSG_CMDCOMPLETE: NCR_MSGS(("cmdcomplete ")); if (sc->sc_dleft < 0) { xpt_print_path(ecb->ccb->ccb_h.path); printf("got %ld extra bytes\n", -(long)sc->sc_dleft); sc->sc_dleft = 0; } ecb->dleft = (ecb->flags & ECB_TENTATIVE_DONE) ? 0 : sc->sc_dleft; if ((ecb->flags & ECB_SENSE) == 0) ecb->ccb->csio.resid = ecb->dleft; sc->sc_state = NCR_CMDCOMPLETE; break; case MSG_MESSAGE_REJECT: NCR_MSGS(("msg reject (msgout=%x) ", sc->sc_msgout)); switch (sc->sc_msgout) { case SEND_TAG: /* * Target does not like tagged queuing. * - Flush the command queue * - Disable tagged queuing for the target * - Dequeue ecb from the queued array. */ device_printf(sc->sc_dev, "tagged queuing " "rejected: target %d\n", ecb->ccb->ccb_h.target_id); NCR_MSGS(("(rejected sent tag)")); NCRCMD(sc, NCRCMD_FLUSH); DELAY(1); ti->flags &= ~T_TAG; lun = ecb->ccb->ccb_h.target_lun; li = TINFO_LUN(ti, lun); if (ecb->tag[0] && li->queued[ecb->tag[1]] != NULL) { li->queued[ecb->tag[1]] = NULL; li->used--; } ecb->tag[0] = ecb->tag[1] = 0; li->untagged = ecb; li->busy = 1; break; case SEND_SDTR: device_printf(sc->sc_dev, "sync transfer " "rejected: target %d\n", ecb->ccb->ccb_h.target_id); ti->flags &= ~T_SDTRSENT; ti->curr.period = ti->goal.period = 0; ti->curr.offset = ti->goal.offset = 0; ncr53c9x_setsync(sc, ti); break; case SEND_WDTR: device_printf(sc->sc_dev, "wide transfer " "rejected: target %d\n", ecb->ccb->ccb_h.target_id); ti->flags &= ~T_WDTRSENT; ti->curr.width = ti->goal.width = MSG_EXT_WDTR_BUS_8_BIT; ncr53c9x_setsync(sc, ti); break; case SEND_INIT_DET_ERR: goto abort; } break; case MSG_NOOP: NCR_MSGS(("noop ")); break; case MSG_HEAD_OF_Q_TAG: case MSG_SIMPLE_Q_TAG: case MSG_ORDERED_Q_TAG: NCR_MSGS(("TAG %x:%x", sc->sc_imess[0], sc->sc_imess[1])); break; case MSG_DISCONNECT: NCR_MSGS(("disconnect ")); ti->dconns++; sc->sc_state = NCR_DISCONNECT; /* * Mark the fact that all bytes have moved. The * target may not bother to do a SAVE POINTERS * at this stage. This flag will set the residual * count to zero on MSG COMPLETE. */ if (sc->sc_dleft == 0) ecb->flags |= ECB_TENTATIVE_DONE; break; case MSG_SAVEDATAPOINTER: NCR_MSGS(("save datapointer ")); ecb->daddr = sc->sc_dp; ecb->dleft = sc->sc_dleft; break; case MSG_RESTOREPOINTERS: NCR_MSGS(("restore datapointer ")); sc->sc_dp = ecb->daddr; sc->sc_dleft = ecb->dleft; break; case MSG_IGN_WIDE_RESIDUE: NCR_MSGS(("ignore wide residue (%d bytes)", sc->sc_imess[1])); if (sc->sc_imess[1] != 1) { xpt_print_path(ecb->ccb->ccb_h.path); printf("unexpected MESSAGE IGNORE WIDE " "RESIDUE (%d bytes); sending REJECT\n", sc->sc_imess[1]); goto reject; } /* * If there was a last transfer of an even number of * bytes, wipe the "done" memory and adjust by one * byte (sc->sc_imess[1]). */ len = sc->sc_dleft - ecb->dleft; if (len != 0 && (len & 1) == 0) { ecb->flags &= ~ECB_TENTATIVE_DONE; sc->sc_dp = (char *)sc->sc_dp - 1; sc->sc_dleft--; } break; case MSG_EXTENDED: NCR_MSGS(("extended(%x) ", sc->sc_imess[2])); switch (sc->sc_imess[2]) { case MSG_EXT_SDTR: NCR_MSGS(("SDTR period %d, offset %d ", sc->sc_imess[3], sc->sc_imess[4])); if (sc->sc_imess[1] != 3) goto reject; ti->curr.period = sc->sc_imess[3]; ti->curr.offset = sc->sc_imess[4]; if (sc->sc_minsync == 0 || ti->curr.offset == 0 || ti->curr.period > 124) { #if 0 #ifdef NCR53C9X_DEBUG xpt_print_path(ecb->ccb->ccb_h.path); printf("async mode\n"); #endif #endif if ((ti->flags & T_SDTRSENT) == 0) { /* * target initiated negotiation */ ti->curr.offset = 0; ncr53c9x_sched_msgout( SEND_SDTR); } } else { ti->curr.period = ncr53c9x_cpb2stp(sc, ncr53c9x_stp2cpb(sc, ti->curr.period)); if ((ti->flags & T_SDTRSENT) == 0) { /* * target initiated negotiation */ if (ti->curr.period < sc->sc_minsync) ti->curr.period = sc->sc_minsync; if (ti->curr.offset > sc->sc_maxoffset) ti->curr.offset = sc->sc_maxoffset; ncr53c9x_sched_msgout( SEND_SDTR); } } ti->flags &= ~T_SDTRSENT; ti->goal.period = ti->curr.period; ti->goal.offset = ti->curr.offset; ncr53c9x_setsync(sc, ti); break; case MSG_EXT_WDTR: NCR_MSGS(("wide mode %d ", sc->sc_imess[3])); ti->curr.width = sc->sc_imess[3]; if (!(ti->flags & T_WDTRSENT)) /* * target initiated negotiation */ ncr53c9x_sched_msgout(SEND_WDTR); ti->flags &= ~T_WDTRSENT; ti->goal.width = ti->curr.width; ncr53c9x_setsync(sc, ti); break; default: xpt_print_path(ecb->ccb->ccb_h.path); printf("unrecognized MESSAGE EXTENDED 0x%x;" " sending REJECT\n", sc->sc_imess[2]); goto reject; } break; default: NCR_MSGS(("ident ")); xpt_print_path(ecb->ccb->ccb_h.path); printf("unrecognized MESSAGE 0x%x; sending REJECT\n", sc->sc_imess[0]); /* FALLTHROUGH */ reject: ncr53c9x_sched_msgout(SEND_REJECT); break; } break; case NCR_IDENTIFIED: /* * IDENTIFY message was received and queue tag is expected * now. */ if ((sc->sc_imess[0] != MSG_SIMPLE_Q_TAG) || (sc->sc_msgify == 0)) { device_printf(sc->sc_dev, "TAG reselect without " "IDENTIFY; MSG %x; sending DEVICE RESET\n", sc->sc_imess[0]); goto reset; } (void)ncr53c9x_reselect(sc, sc->sc_msgify, sc->sc_imess[0], sc->sc_imess[1]); break; case NCR_RESELECTED: if (MSG_ISIDENTIFY(sc->sc_imess[1])) sc->sc_msgify = sc->sc_imess[1]; else { device_printf(sc->sc_dev, "reselect without IDENTIFY;" " MSG %x; sending DEVICE RESET\n", sc->sc_imess[1]); goto reset; } (void)ncr53c9x_reselect(sc, sc->sc_msgify, 0, 0); break; default: device_printf(sc->sc_dev, "unexpected MESSAGE IN; " "sending DEVICE RESET\n"); /* FALLTHROUGH */ reset: ncr53c9x_sched_msgout(SEND_DEV_RESET); break; abort: ncr53c9x_sched_msgout(SEND_ABORT); } /* If we have more messages to send set ATN. */ if (sc->sc_msgpriq) { NCRCMD(sc, NCRCMD_SETATN); sc->sc_flags |= NCR_ATN; } /* Acknowledge last message byte. */ NCRCMD(sc, NCRCMD_MSGOK); /* Done, reset message pointer. */ sc->sc_flags &= ~NCR_DROP_MSGI; sc->sc_imlen = 0; } /* * Send the highest priority, scheduled message. */ static void ncr53c9x_msgout(struct ncr53c9x_softc *sc) { struct ncr53c9x_tinfo *ti; struct ncr53c9x_ecb *ecb; size_t size; int error; #ifdef NCR53C9X_DEBUG int i; #endif NCR_LOCK_ASSERT(sc, MA_OWNED); NCR_TRACE(("[%s(priq:%x, prevphase:%x)]", __func__, sc->sc_msgpriq, sc->sc_prevphase)); /* * XXX - the NCR_ATN flag is not in sync with the actual ATN * condition on the SCSI bus. The 53c9x chip * automatically turns off ATN before sending the * message byte. (See also the comment below in the * default case when picking out a message to send.) */ if (sc->sc_flags & NCR_ATN) { if (sc->sc_prevphase != MESSAGE_OUT_PHASE) { new: NCRCMD(sc, NCRCMD_FLUSH); #if 0 DELAY(1); #endif sc->sc_msgoutq = 0; sc->sc_omlen = 0; } } else { if (sc->sc_prevphase == MESSAGE_OUT_PHASE) { ncr53c9x_sched_msgout(sc->sc_msgoutq); goto new; } else device_printf(sc->sc_dev, "at line %d: unexpected " "MESSAGE OUT phase\n", __LINE__); } if (sc->sc_omlen == 0) { /* Pick up highest priority message. */ sc->sc_msgout = sc->sc_msgpriq & -sc->sc_msgpriq; sc->sc_msgoutq |= sc->sc_msgout; sc->sc_msgpriq &= ~sc->sc_msgout; sc->sc_omlen = 1; /* "Default" message len */ switch (sc->sc_msgout) { case SEND_SDTR: ecb = sc->sc_nexus; ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id]; sc->sc_omess[0] = MSG_EXTENDED; sc->sc_omess[1] = MSG_EXT_SDTR_LEN; sc->sc_omess[2] = MSG_EXT_SDTR; sc->sc_omess[3] = ti->goal.period; sc->sc_omess[4] = ti->goal.offset; sc->sc_omlen = 5; break; case SEND_WDTR: ecb = sc->sc_nexus; ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id]; sc->sc_omess[0] = MSG_EXTENDED; sc->sc_omess[1] = MSG_EXT_WDTR_LEN; sc->sc_omess[2] = MSG_EXT_WDTR; sc->sc_omess[3] = ti->goal.width; sc->sc_omlen = 4; break; case SEND_IDENTIFY: if (sc->sc_state != NCR_CONNECTED) device_printf(sc->sc_dev, "at line %d: no " "nexus\n", __LINE__); ecb = sc->sc_nexus; sc->sc_omess[0] = MSG_IDENTIFY(ecb->ccb->ccb_h.target_lun, 0); break; case SEND_TAG: if (sc->sc_state != NCR_CONNECTED) device_printf(sc->sc_dev, "at line %d: no " "nexus\n", __LINE__); ecb = sc->sc_nexus; sc->sc_omess[0] = ecb->tag[0]; sc->sc_omess[1] = ecb->tag[1]; sc->sc_omlen = 2; break; case SEND_DEV_RESET: sc->sc_flags |= NCR_ABORTING; sc->sc_omess[0] = MSG_BUS_DEV_RESET; ecb = sc->sc_nexus; ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id]; ti->curr.period = 0; ti->curr.offset = 0; ti->curr.width = MSG_EXT_WDTR_BUS_8_BIT; break; case SEND_PARITY_ERROR: sc->sc_omess[0] = MSG_PARITY_ERROR; break; case SEND_ABORT: sc->sc_flags |= NCR_ABORTING; sc->sc_omess[0] = MSG_ABORT; break; case SEND_INIT_DET_ERR: sc->sc_omess[0] = MSG_INITIATOR_DET_ERR; break; case SEND_REJECT: sc->sc_omess[0] = MSG_MESSAGE_REJECT; break; default: /* * We normally do not get here, since the chip * automatically turns off ATN before the last * byte of a message is sent to the target. * However, if the target rejects our (multi-byte) * message early by switching to MSG IN phase * ATN remains on, so the target may return to * MSG OUT phase. If there are no scheduled messages * left we send a NO-OP. * * XXX - Note that this leaves no useful purpose for * the NCR_ATN flag. */ sc->sc_flags &= ~NCR_ATN; sc->sc_omess[0] = MSG_NOOP; } sc->sc_omp = sc->sc_omess; } #ifdef NCR53C9X_DEBUG if ((ncr53c9x_debug & NCR_SHOWMSGS) != 0) { NCR_MSGS(("sc_omlen; i++) NCR_MSGS((" %02x", sc->sc_omess[i])); NCR_MSGS(("> ")); } #endif if (sc->sc_rev != NCR_VARIANT_FAS366) { /* (Re)send the message. */ size = ulmin(sc->sc_omlen, sc->sc_maxxfer); error = NCRDMA_SETUP(sc, &sc->sc_omp, &sc->sc_omlen, 0, &size); if (error != 0) goto cmd; /* Program the SCSI counter. */ NCR_SET_COUNT(sc, size); /* Load the count in and start the message-out transfer. */ NCRCMD(sc, NCRCMD_NOP | NCRCMD_DMA); NCRCMD(sc, NCRCMD_TRANS | NCRCMD_DMA); NCRDMA_GO(sc); return; } cmd: /* * XXX FIFO size */ sc->sc_cmdlen = 0; ncr53c9x_flushfifo(sc); ncr53c9x_wrfifo(sc, sc->sc_omp, sc->sc_omlen); NCRCMD(sc, NCRCMD_TRANS); } void ncr53c9x_intr(void *arg) { struct ncr53c9x_softc *sc = arg; if (!NCRDMA_ISINTR(sc)) return; NCR_LOCK(sc); ncr53c9x_intr1(sc); NCR_UNLOCK(sc); } /* * This is the most critical part of the driver, and has to know * how to deal with *all* error conditions and phases from the SCSI * bus. If there are no errors and the DMA was active, then call the * DMA pseudo-interrupt handler. If this returns 1, then that was it * and we can return from here without further processing. * * Most of this needs verifying. */ static void ncr53c9x_intr1(struct ncr53c9x_softc *sc) { struct ncr53c9x_ecb *ecb; struct ncr53c9x_linfo *li; struct ncr53c9x_tinfo *ti; struct timeval cur, wait; size_t size; int error, i, nfifo; uint8_t msg; NCR_LOCK_ASSERT(sc, MA_OWNED); NCR_INTS(("[ncr53c9x_intr: state %d]", sc->sc_state)); again: /* and what do the registers say... */ ncr53c9x_readregs(sc); /* * At the moment, only a SCSI Bus Reset or Illegal * Command are classed as errors. A disconnect is a * valid condition, and we let the code check is the * "NCR_BUSFREE_OK" flag was set before declaring it * and error. * * Also, the status register tells us about "Gross * Errors" and "Parity errors". Only the Gross Error * is really bad, and the parity errors are dealt * with later. * * TODO * If there are too many parity error, go to slow * cable mode? */ if ((sc->sc_espintr & NCRINTR_SBR) != 0) { if ((NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) != 0) { NCRCMD(sc, NCRCMD_FLUSH); DELAY(1); } if (sc->sc_state != NCR_SBR) { device_printf(sc->sc_dev, "SCSI bus reset\n"); ncr53c9x_init(sc, 0); /* Restart everything. */ return; } #if 0 /*XXX*/ device_printf(sc->sc_dev, "\n", sc->sc_espintr, sc->sc_espstat, sc->sc_espstep); #endif if (sc->sc_nexus != NULL) panic("%s: nexus in reset state", device_get_nameunit(sc->sc_dev)); goto sched; } ecb = sc->sc_nexus; #define NCRINTR_ERR (NCRINTR_SBR | NCRINTR_ILL) if (sc->sc_espintr & NCRINTR_ERR || sc->sc_espstat & NCRSTAT_GE) { if ((sc->sc_espstat & NCRSTAT_GE) != 0) { /* Gross Error; no target? */ if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) { NCRCMD(sc, NCRCMD_FLUSH); DELAY(1); } if (sc->sc_state == NCR_CONNECTED || sc->sc_state == NCR_SELECTING) { ecb->ccb->ccb_h.status = CAM_SEL_TIMEOUT; ncr53c9x_done(sc, ecb); } return; } if ((sc->sc_espintr & NCRINTR_ILL) != 0) { if ((sc->sc_flags & NCR_EXPECT_ILLCMD) != 0) { /* * Eat away "Illegal command" interrupt * on a ESP100 caused by a re-selection * while we were trying to select * another target. */ #ifdef NCR53C9X_DEBUG device_printf(sc->sc_dev, "ESP100 work-around " "activated\n"); #endif sc->sc_flags &= ~NCR_EXPECT_ILLCMD; return; } /* Illegal command, out of sync? */ device_printf(sc->sc_dev, "illegal command: 0x%x " "(state %d, phase %x, prevphase %x)\n", sc->sc_lastcmd, sc->sc_state, sc->sc_phase, sc->sc_prevphase); if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) { NCRCMD(sc, NCRCMD_FLUSH); DELAY(1); } goto reset; } } sc->sc_flags &= ~NCR_EXPECT_ILLCMD; /* * Call if DMA is active. * * If DMA_INTR returns true, then maybe go 'round the loop * again in case there is no more DMA queued, but a phase * change is expected. */ if (NCRDMA_ISACTIVE(sc)) { if (NCRDMA_INTR(sc) == -1) { device_printf(sc->sc_dev, "DMA error; resetting\n"); goto reset; } /* If DMA active here, then go back to work... */ if (NCRDMA_ISACTIVE(sc)) return; if ((sc->sc_espstat & NCRSTAT_TC) == 0) { /* * DMA not completed. If we can not find a * acceptable explanation, print a diagnostic. */ if (sc->sc_state == NCR_SELECTING) /* * This can happen if we are reselected * while using DMA to select a target. */ /*void*/; else if (sc->sc_prevphase == MESSAGE_OUT_PHASE) { /* * Our (multi-byte) message (eg SDTR) was * interrupted by the target to send * a MSG REJECT. * Print diagnostic if current phase * is not MESSAGE IN. */ if (sc->sc_phase != MESSAGE_IN_PHASE) device_printf(sc->sc_dev,"!TC on MSGOUT" " [intr %x, stat %x, step %d]" " prevphase %x, resid %lx\n", sc->sc_espintr, sc->sc_espstat, sc->sc_espstep, sc->sc_prevphase, (u_long)sc->sc_omlen); } else if (sc->sc_dleft == 0) { /* * The DMA operation was started for * a DATA transfer. Print a diagnostic * if the DMA counter and TC bit * appear to be out of sync. * * XXX This is fatal and usually means that * the DMA engine is hopelessly out of * sync with reality. A disk is likely * getting spammed at this point. */ device_printf(sc->sc_dev, "!TC on DATA XFER" " [intr %x, stat %x, step %d]" " prevphase %x, resid %x\n", sc->sc_espintr, sc->sc_espstat, sc->sc_espstep, sc->sc_prevphase, ecb ? ecb->dleft : -1); goto reset; } } } /* * Check for less serious errors. */ if ((sc->sc_espstat & NCRSTAT_PE) != 0) { device_printf(sc->sc_dev, "SCSI bus parity error\n"); if (sc->sc_prevphase == MESSAGE_IN_PHASE) ncr53c9x_sched_msgout(SEND_PARITY_ERROR); else ncr53c9x_sched_msgout(SEND_INIT_DET_ERR); } if ((sc->sc_espintr & NCRINTR_DIS) != 0) { sc->sc_msgify = 0; NCR_INTS(("", sc->sc_espintr,sc->sc_espstat,sc->sc_espstep)); if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) { NCRCMD(sc, NCRCMD_FLUSH); #if 0 DELAY(1); #endif } /* * This command must (apparently) be issued within * 250mS of a disconnect. So here you are... */ NCRCMD(sc, NCRCMD_ENSEL); switch (sc->sc_state) { case NCR_RESELECTED: goto sched; case NCR_SELECTING: ecb->ccb->ccb_h.status = CAM_SEL_TIMEOUT; /* Selection timeout -- discard all LUNs if empty. */ ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id]; li = LIST_FIRST(&ti->luns); while (li != NULL) { if (li->untagged == NULL && li->used == 0) { if (li->lun < NCR_NLUN) ti->lun[li->lun] = NULL; LIST_REMOVE(li, link); free(li, M_DEVBUF); /* * Restart the search at the beginning. */ li = LIST_FIRST(&ti->luns); continue; } li = LIST_NEXT(li, link); } goto finish; case NCR_CONNECTED: if (ecb != NULL) { ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id]; if ((ti->flags & T_SDTRSENT) != 0) { xpt_print_path(ecb->ccb->ccb_h.path); printf("sync nego not completed!\n"); ti->flags &= ~T_SDTRSENT; ti->curr.period = ti->goal.period = 0; ti->curr.offset = ti->goal.offset = 0; ncr53c9x_setsync(sc, ti); } if ((ti->flags & T_WDTRSENT) != 0) { xpt_print_path(ecb->ccb->ccb_h.path); printf("wide nego not completed!\n"); ti->flags &= ~T_WDTRSENT; ti->curr.width = ti->goal.width = MSG_EXT_WDTR_BUS_8_BIT; ncr53c9x_setsync(sc, ti); } } /* It may be OK to disconnect. */ if ((sc->sc_flags & NCR_ABORTING) == 0) { /* * Section 5.1.1 of the SCSI 2 spec * suggests issuing a REQUEST SENSE * following an unexpected disconnect. * Some devices go into a contingent * allegiance condition when * disconnecting, and this is necessary * to clean up their state. */ device_printf(sc->sc_dev, "unexpected " "disconnect [state %d, intr %x, stat %x, " "phase(c %x, p %x)]; ", sc->sc_state, sc->sc_espintr, sc->sc_espstat, sc->sc_phase, sc->sc_prevphase); /* * XXX This will cause a chip reset and will * prevent us from finding out the real * problem with the device. However, it's * necessary until a way can be found to * safely cancel the DMA that is in * progress. */ if (1 || (ecb->flags & ECB_SENSE) != 0) { printf("resetting\n"); goto reset; } printf("sending REQUEST SENSE\n"); callout_stop(&ecb->ch); ncr53c9x_sense(sc, ecb); return; } else if (ecb != NULL && (ecb->flags & ECB_RESET) != 0) { ecb->ccb->ccb_h.status = CAM_REQ_CMP; goto finish; } ecb->ccb->ccb_h.status = CAM_CMD_TIMEOUT; goto finish; case NCR_DISCONNECT: sc->sc_nexus = NULL; goto sched; case NCR_CMDCOMPLETE: ecb->ccb->ccb_h.status = CAM_REQ_CMP; goto finish; } } switch (sc->sc_state) { case NCR_SBR: device_printf(sc->sc_dev, "waiting for Bus Reset to happen\n"); return; case NCR_RESELECTED: /* * We must be continuing a message? */ device_printf(sc->sc_dev, "unhandled reselect continuation, " "state %d, intr %02x\n", sc->sc_state, sc->sc_espintr); goto reset; break; case NCR_IDENTIFIED: ecb = sc->sc_nexus; if (sc->sc_phase != MESSAGE_IN_PHASE) { i = NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF; /* * Things are seriously screwed up. * Pull the brakes, i.e. reset. */ device_printf(sc->sc_dev, "target didn't send tag: %d " "bytes in FIFO\n", i); /* Drain and display FIFO. */ while (i-- > 0) printf("[%d] ", NCR_READ_REG(sc, NCR_FIFO)); goto reset; } else goto msgin; case NCR_IDLE: case NCR_SELECTING: ecb = sc->sc_nexus; if (sc->sc_espintr & NCRINTR_RESEL) { sc->sc_msgpriq = sc->sc_msgout = sc->sc_msgoutq = 0; sc->sc_flags = 0; /* * If we're trying to select a * target ourselves, push our command * back into the ready list. */ if (sc->sc_state == NCR_SELECTING) { NCR_INTS(("backoff selector ")); callout_stop(&ecb->ch); ncr53c9x_dequeue(sc, ecb); TAILQ_INSERT_HEAD(&sc->ready_list, ecb, chain); ecb->flags |= ECB_READY; ecb = sc->sc_nexus = NULL; } sc->sc_state = NCR_RESELECTED; if (sc->sc_phase != MESSAGE_IN_PHASE) { /* * Things are seriously screwed up. * Pull the brakes, i.e. reset */ device_printf(sc->sc_dev, "target didn't " "identify\n"); goto reset; } /* * The C90 only inhibits FIFO writes until reselection * is complete instead of waiting until the interrupt * status register has been read. So, if the reselect * happens while we were entering command bytes (for * another target) some of those bytes can appear in * the FIFO here, after the interrupt is taken. * * To remedy this situation, pull the Selection ID * and Identify message from the FIFO directly, and * ignore any extraneous FIFO contents. Also, set * a flag that allows one Illegal Command Interrupt * to occur which the chip also generates as a result * of writing to the FIFO during a reselect. */ if (sc->sc_rev == NCR_VARIANT_ESP100) { nfifo = NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF; sc->sc_imess[0] = NCR_READ_REG(sc, NCR_FIFO); sc->sc_imess[1] = NCR_READ_REG(sc, NCR_FIFO); sc->sc_imlen = 2; if (nfifo != 2) { /* Flush the rest. */ NCRCMD(sc, NCRCMD_FLUSH); } sc->sc_flags |= NCR_EXPECT_ILLCMD; if (nfifo > 2) nfifo = 2; /* We fixed it... */ } else nfifo = ncr53c9x_rdfifo(sc, NCR_RDFIFO_START); if (nfifo != 2) { device_printf(sc->sc_dev, "RESELECT: %d bytes " "in FIFO! [intr %x, stat %x, step %d, " "prevphase %x]\n", nfifo, sc->sc_espintr, sc->sc_espstat, sc->sc_espstep, sc->sc_prevphase); goto reset; } sc->sc_selid = sc->sc_imess[0]; NCR_INTS(("selid=%02x ", sc->sc_selid)); /* Handle IDENTIFY message. */ ncr53c9x_msgin(sc); if (sc->sc_state != NCR_CONNECTED && sc->sc_state != NCR_IDENTIFIED) { /* IDENTIFY fail?! */ device_printf(sc->sc_dev, "identify failed, " "state %d, intr %02x\n", sc->sc_state, sc->sc_espintr); goto reset; } goto shortcut; /* i.e. next phase expected soon */ } #define NCRINTR_DONE (NCRINTR_FC | NCRINTR_BS) if ((sc->sc_espintr & NCRINTR_DONE) == NCRINTR_DONE) { /* * Arbitration won; examine the `step' register * to determine how far the selection could progress. */ if (ecb == NULL) { /* * When doing path inquiry during boot * FAS100A trigger a stray interrupt which * we just ignore instead of panicing. */ if (sc->sc_state == NCR_IDLE && sc->sc_espstep == 0) return; panic("%s: no nexus", __func__); } ti = &sc->sc_tinfo[ecb->ccb->ccb_h.target_id]; switch (sc->sc_espstep) { case 0: /* * The target did not respond with a * message out phase - probably an old * device that doesn't recognize ATN. * Clear ATN and just continue, the * target should be in the command * phase. * XXX check for command phase? */ NCRCMD(sc, NCRCMD_RSTATN); break; case 1: if (ti->curr.period == ti->goal.period && ti->curr.offset == ti->goal.offset && ti->curr.width == ti->goal.width && ecb->tag[0] == 0) { device_printf(sc->sc_dev, "step 1 " "and no negotiation to perform " "or tag to send\n"); goto reset; } if (sc->sc_phase != MESSAGE_OUT_PHASE) { device_printf(sc->sc_dev, "step 1 " "but not in MESSAGE_OUT_PHASE\n"); goto reset; } sc->sc_prevphase = MESSAGE_OUT_PHASE; /* XXX */ if (ecb->flags & ECB_RESET) { /* * A DEVICE RESET was scheduled and * ATNS used. As SEND_DEV_RESET has * the highest priority, the target * will reset and disconnect and we * will end up in ncr53c9x_done w/o * negotiating or sending a TAG. So * we just break here in order to * avoid warnings about negotiation * not having completed. */ ncr53c9x_sched_msgout(SEND_DEV_RESET); break; } if (ti->curr.width != ti->goal.width) { ti->flags |= T_WDTRSENT | T_SDTRSENT; ncr53c9x_sched_msgout(SEND_WDTR | SEND_SDTR); } if (ti->curr.period != ti->goal.period || ti->curr.offset != ti->goal.offset) { ti->flags |= T_SDTRSENT; ncr53c9x_sched_msgout(SEND_SDTR); } if (ecb->tag[0] != 0) /* Could not do ATN3 so send TAG. */ ncr53c9x_sched_msgout(SEND_TAG); break; case 3: /* * Grr, this is supposed to mean * "target left command phase prematurely". * It seems to happen regularly when * sync mode is on. * Look at FIFO to see if command went out. * (Timing problems?) */ if (sc->sc_features & NCR_F_DMASELECT) { if (sc->sc_cmdlen == 0) { /* Hope for the best... */ break; } } else if ((NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) == 0) { /* Hope for the best... */ break; } xpt_print_path(ecb->ccb->ccb_h.path); printf("selection failed; %d left in FIFO " "[intr %x, stat %x, step %d]\n", NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF, sc->sc_espintr, sc->sc_espstat, sc->sc_espstep); NCRCMD(sc, NCRCMD_FLUSH); ncr53c9x_sched_msgout(SEND_ABORT); return; case 2: /* Select stuck at Command Phase. */ NCRCMD(sc, NCRCMD_FLUSH); break; case 4: if (sc->sc_features & NCR_F_DMASELECT && sc->sc_cmdlen != 0) { xpt_print_path(ecb->ccb->ccb_h.path); printf("select; %lu left in DMA buffer " "[intr %x, stat %x, step %d]\n", (u_long)sc->sc_cmdlen, sc->sc_espintr, sc->sc_espstat, sc->sc_espstep); } /* So far, everything went fine. */ break; } sc->sc_prevphase = INVALID_PHASE; /* ??? */ /* Do an implicit RESTORE POINTERS. */ sc->sc_dp = ecb->daddr; sc->sc_dleft = ecb->dleft; sc->sc_state = NCR_CONNECTED; break; } else { device_printf(sc->sc_dev, "unexpected status after " "select: [intr %x, stat %x, step %x]\n", sc->sc_espintr, sc->sc_espstat, sc->sc_espstep); NCRCMD(sc, NCRCMD_FLUSH); DELAY(1); goto reset; } if (sc->sc_state == NCR_IDLE) { device_printf(sc->sc_dev, "stray interrupt\n"); return; } break; case NCR_CONNECTED: if ((sc->sc_flags & NCR_ICCS) != 0) { /* "Initiate Command Complete Steps" in progress */ sc->sc_flags &= ~NCR_ICCS; if ((sc->sc_espintr & NCRINTR_DONE) == 0) { device_printf(sc->sc_dev, "ICCS: " ": [intr %x, stat %x, step %x]\n", sc->sc_espintr, sc->sc_espstat, sc->sc_espstep); } ncr53c9x_rdfifo(sc, NCR_RDFIFO_START); if (sc->sc_imlen < 2) device_printf(sc->sc_dev, "can't get status, " "only %d bytes\n", (int)sc->sc_imlen); ecb->stat = sc->sc_imess[sc->sc_imlen - 2]; msg = sc->sc_imess[sc->sc_imlen - 1]; NCR_PHASE(("", ecb->stat, msg)); if (msg == MSG_CMDCOMPLETE) { ecb->dleft = (ecb->flags & ECB_TENTATIVE_DONE) ? 0 : sc->sc_dleft; if ((ecb->flags & ECB_SENSE) == 0) ecb->ccb->csio.resid = ecb->dleft; sc->sc_state = NCR_CMDCOMPLETE; } else device_printf(sc->sc_dev, "STATUS_PHASE: " "msg %d\n", msg); sc->sc_imlen = 0; NCRCMD(sc, NCRCMD_MSGOK); goto shortcut; /* i.e. wait for disconnect */ } break; default: device_printf(sc->sc_dev, "invalid state: %d [intr %x, " "phase(c %x, p %x)]\n", sc->sc_state, sc->sc_espintr, sc->sc_phase, sc->sc_prevphase); goto reset; } /* * Driver is now in state NCR_CONNECTED, i.e. we * have a current command working the SCSI bus. */ if (sc->sc_state != NCR_CONNECTED || ecb == NULL) panic("%s: no nexus", __func__); switch (sc->sc_phase) { case MESSAGE_OUT_PHASE: NCR_PHASE(("MESSAGE_OUT_PHASE ")); ncr53c9x_msgout(sc); sc->sc_prevphase = MESSAGE_OUT_PHASE; break; case MESSAGE_IN_PHASE: msgin: NCR_PHASE(("MESSAGE_IN_PHASE ")); if ((sc->sc_espintr & NCRINTR_BS) != 0) { if ((sc->sc_rev != NCR_VARIANT_FAS366) || (sc->sc_espstat2 & NCRFAS_STAT2_EMPTY) == 0) { NCRCMD(sc, NCRCMD_FLUSH); } sc->sc_flags |= NCR_WAITI; NCRCMD(sc, NCRCMD_TRANS); } else if ((sc->sc_espintr & NCRINTR_FC) != 0) { if ((sc->sc_flags & NCR_WAITI) == 0) { device_printf(sc->sc_dev, "MSGIN: unexpected " "FC bit: [intr %x, stat %x, step %x]\n", sc->sc_espintr, sc->sc_espstat, sc->sc_espstep); } sc->sc_flags &= ~NCR_WAITI; ncr53c9x_rdfifo(sc, (sc->sc_prevphase == sc->sc_phase) ? NCR_RDFIFO_CONTINUE : NCR_RDFIFO_START); ncr53c9x_msgin(sc); } else device_printf(sc->sc_dev, "MSGIN: weird bits: " "[intr %x, stat %x, step %x]\n", sc->sc_espintr, sc->sc_espstat, sc->sc_espstep); sc->sc_prevphase = MESSAGE_IN_PHASE; goto shortcut; /* i.e. expect data to be ready */ case COMMAND_PHASE: /* * Send the command block. Normally we don't see this * phase because the SEL_ATN command takes care of * all this. However, we end up here if either the * target or we wanted to exchange some more messages * first (e.g. to start negotiations). */ NCR_PHASE(("COMMAND_PHASE 0x%02x (%d) ", ecb->cmd.cmd.opcode, ecb->clen)); if (NCR_READ_REG(sc, NCR_FFLAG) & NCRFIFO_FF) { NCRCMD(sc, NCRCMD_FLUSH); #if 0 DELAY(1); #endif } /* * If we have more messages to send, e.g. WDTR or SDTR * after we've sent a TAG, set ATN so we'll go back to * MESSAGE_OUT_PHASE. */ if (sc->sc_msgpriq) { NCRCMD(sc, NCRCMD_SETATN); sc->sc_flags |= NCR_ATN; } if (sc->sc_features & NCR_F_DMASELECT) { /* Setup DMA transfer for command. */ size = ecb->clen; sc->sc_cmdlen = size; sc->sc_cmdp = (void *)&ecb->cmd.cmd; error = NCRDMA_SETUP(sc, &sc->sc_cmdp, &sc->sc_cmdlen, 0, &size); if (error != 0) goto cmd; /* Program the SCSI counter. */ NCR_SET_COUNT(sc, size); /* Load the count in. */ NCRCMD(sc, NCRCMD_NOP | NCRCMD_DMA); /* Start the command transfer. */ NCRCMD(sc, NCRCMD_TRANS | NCRCMD_DMA); NCRDMA_GO(sc); sc->sc_prevphase = COMMAND_PHASE; break; } cmd: sc->sc_cmdlen = 0; ncr53c9x_wrfifo(sc, (uint8_t *)&ecb->cmd.cmd, ecb->clen); NCRCMD(sc, NCRCMD_TRANS); sc->sc_prevphase = COMMAND_PHASE; break; case DATA_OUT_PHASE: NCR_PHASE(("DATA_OUT_PHASE [%ld] ", (long)sc->sc_dleft)); sc->sc_prevphase = DATA_OUT_PHASE; NCRCMD(sc, NCRCMD_FLUSH); size = ulmin(sc->sc_dleft, sc->sc_maxxfer); error = NCRDMA_SETUP(sc, &sc->sc_dp, &sc->sc_dleft, 0, &size); goto setup_xfer; case DATA_IN_PHASE: NCR_PHASE(("DATA_IN_PHASE ")); sc->sc_prevphase = DATA_IN_PHASE; if (sc->sc_rev == NCR_VARIANT_ESP100) NCRCMD(sc, NCRCMD_FLUSH); size = ulmin(sc->sc_dleft, sc->sc_maxxfer); error = NCRDMA_SETUP(sc, &sc->sc_dp, &sc->sc_dleft, 1, &size); setup_xfer: if (error != 0) { switch (error) { case EFBIG: ecb->ccb->ccb_h.status |= CAM_REQ_TOO_BIG; break; case EINPROGRESS: panic("%s: cannot deal with deferred DMA", __func__); case EINVAL: ecb->ccb->ccb_h.status |= CAM_REQ_INVALID; break; case ENOMEM: ecb->ccb->ccb_h.status |= CAM_REQUEUE_REQ; break; default: ecb->ccb->ccb_h.status |= CAM_REQ_CMP_ERR; } goto finish; } /* Target returned to data phase: wipe "done" memory. */ ecb->flags &= ~ECB_TENTATIVE_DONE; /* Program the SCSI counter. */ NCR_SET_COUNT(sc, size); /* Load the count in. */ NCRCMD(sc, NCRCMD_NOP | NCRCMD_DMA); /* * Note that if `size' is 0, we've already transceived * all the bytes we want but we're still in DATA PHASE. * Apparently, the device needs padding. Also, a * transfer size of 0 means "maximum" to the chip * DMA logic. */ NCRCMD(sc, (size == 0 ? NCRCMD_TRPAD : NCRCMD_TRANS) | NCRCMD_DMA); NCRDMA_GO(sc); return; case STATUS_PHASE: NCR_PHASE(("STATUS_PHASE ")); sc->sc_flags |= NCR_ICCS; NCRCMD(sc, NCRCMD_ICCS); sc->sc_prevphase = STATUS_PHASE; goto shortcut; /* i.e. expect status results soon */ case INVALID_PHASE: break; default: device_printf(sc->sc_dev, "unexpected bus phase; resetting\n"); goto reset; } return; reset: ncr53c9x_init(sc, 1); return; finish: ncr53c9x_done(sc, ecb); return; sched: sc->sc_state = NCR_IDLE; ncr53c9x_sched(sc); return; shortcut: /* * The idea is that many of the SCSI operations take very little * time, and going away and getting interrupted is too high an * overhead to pay. For example, selecting, sending a message * and command and then doing some work can be done in one "pass". * * The delay is a heuristic. It is 2 when at 20 MHz, 2 at 25 MHz and * 1 at 40 MHz. This needs testing. */ microtime(&wait); wait.tv_usec += 50 / sc->sc_freq; if (wait.tv_usec > 1000000) { wait.tv_sec++; wait.tv_usec -= 1000000; } do { if (NCRDMA_ISINTR(sc)) goto again; microtime(&cur); } while (cur.tv_sec <= wait.tv_sec && cur.tv_usec <= wait.tv_usec); } static void ncr53c9x_abort(struct ncr53c9x_softc *sc, struct ncr53c9x_ecb *ecb) { NCR_LOCK_ASSERT(sc, MA_OWNED); /* 2 secs for the abort */ ecb->timeout = NCR_ABORT_TIMEOUT; ecb->flags |= ECB_ABORT; if (ecb == sc->sc_nexus) { /* * If we're still selecting, the message will be scheduled * after selection is complete. */ if (sc->sc_state == NCR_CONNECTED) ncr53c9x_sched_msgout(SEND_ABORT); /* * Reschedule callout. */ callout_reset(&ecb->ch, mstohz(ecb->timeout), ncr53c9x_callout, ecb); } else { /* * Just leave the command where it is. * XXX - what choice do we have but to reset the SCSI * eventually? */ if (sc->sc_state == NCR_IDLE) ncr53c9x_sched(sc); } } static void ncr53c9x_callout(void *arg) { struct ncr53c9x_ecb *ecb = arg; union ccb *ccb = ecb->ccb; struct ncr53c9x_softc *sc = ecb->sc; struct ncr53c9x_tinfo *ti; NCR_LOCK_ASSERT(sc, MA_OWNED); ti = &sc->sc_tinfo[ccb->ccb_h.target_id]; xpt_print_path(ccb->ccb_h.path); device_printf(sc->sc_dev, "timed out [ecb %p (flags 0x%x, dleft %x, " "stat %x)], ", ecb, ecb->flags, ecb->dleft, ecb->stat, sc->sc_state, sc->sc_nexus, NCR_READ_REG(sc, NCR_STAT), sc->sc_phase, sc->sc_prevphase, (long)sc->sc_dleft, sc->sc_msgpriq, sc->sc_msgout, NCRDMA_ISACTIVE(sc) ? "DMA active" : ""); #if defined(NCR53C9X_DEBUG) && NCR53C9X_DEBUG > 1 printf("TRACE: %s.", ecb->trace); #endif if (ecb->flags & ECB_ABORT) { /* Abort timed out. */ printf(" AGAIN\n"); ncr53c9x_init(sc, 1); } else { /* Abort the operation that has timed out. */ printf("\n"); ccb->ccb_h.status = CAM_CMD_TIMEOUT; ncr53c9x_abort(sc, ecb); /* Disable sync mode if stuck in a data phase. */ if (ecb == sc->sc_nexus && ti->curr.offset != 0 && (sc->sc_phase & (MSGI | CDI)) == 0) { /* XXX ASYNC CALLBACK! */ ti->goal.offset = 0; xpt_print_path(ccb->ccb_h.path); printf("sync negotiation disabled\n"); } } } static void ncr53c9x_watch(void *arg) { struct ncr53c9x_softc *sc = arg; struct ncr53c9x_linfo *li; struct ncr53c9x_tinfo *ti; time_t old; int t; NCR_LOCK_ASSERT(sc, MA_OWNED); /* Delete any structures that have not been used in 10min. */ old = time_second - (10 * 60); for (t = 0; t < sc->sc_ntarg; t++) { ti = &sc->sc_tinfo[t]; li = LIST_FIRST(&ti->luns); while (li) { if (li->last_used < old && li->untagged == NULL && li->used == 0) { if (li->lun < NCR_NLUN) ti->lun[li->lun] = NULL; LIST_REMOVE(li, link); free(li, M_DEVBUF); /* Restart the search at the beginning. */ li = LIST_FIRST(&ti->luns); continue; } li = LIST_NEXT(li, link); } } callout_reset(&sc->sc_watchdog, 60 * hz, ncr53c9x_watch, sc); } Index: head/sys/dev/fb/splash.c =================================================================== --- head/sys/dev/fb/splash.c (revision 327948) +++ head/sys/dev/fb/splash.c (revision 327949) @@ -1,217 +1,217 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "opt_splash.h" #include #include #include #include #include #include #include #include #include MODULE_VERSION(splash, 1); /* video adapter and image decoder */ static video_adapter_t *splash_adp; static splash_decoder_t *splash_decoder; /* decoder candidates */ static int decoders; static splash_decoder_t **decoder_set; #define DECODER_ARRAY_DELTA 4 /* console driver callback */ static int (*splash_callback)(int, void *); static void *splash_arg; static int splash_find_data(splash_decoder_t *decoder) { caddr_t image_module; void *ptr; size_t sz; if (decoder->data_type == NULL) return (0); image_module = preload_search_by_type(decoder->data_type); if (image_module == NULL) return (ENOENT); ptr = preload_fetch_addr(image_module); sz = preload_fetch_size(image_module); if (ptr == NULL || sz == 0) return (ENOENT); if (bootverbose) printf("splash: image@%p, size:%zu\n", ptr, sz); decoder->data = ptr; decoder->data_size = sz; return (0); } static int splash_test(splash_decoder_t *decoder) { if (splash_find_data(decoder)) return ENOENT; /* XXX */ if (*decoder->init && (*decoder->init)(splash_adp)) { decoder->data = NULL; decoder->data_size = 0; return ENODEV; /* XXX */ } if (bootverbose) printf("splash: image decoder found: %s\n", decoder->name); return 0; } static void splash_new(splash_decoder_t *decoder) { splash_decoder = decoder; if (splash_callback != NULL) (*splash_callback)(SPLASH_INIT, splash_arg); } int splash_register(splash_decoder_t *decoder) { splash_decoder_t **p; int error; int i; if (splash_adp != NULL) { /* * If the video card has already been initialized, test * this decoder immediately. */ error = splash_test(decoder); if (error == 0) { /* replace the current decoder with new one */ if (splash_decoder != NULL) error = splash_term(splash_adp); if (error == 0) splash_new(decoder); } return error; } else { /* register the decoder for later use */ for (i = 0; i < decoders; ++i) { if (decoder_set[i] == NULL) break; } if ((i >= decoders) && (decoders % DECODER_ARRAY_DELTA) == 0) { - p = malloc(sizeof(*p)*(decoders + DECODER_ARRAY_DELTA), - M_DEVBUF, M_NOWAIT); + p = mallocarray(decoders + DECODER_ARRAY_DELTA, + sizeof(*p), M_DEVBUF, M_NOWAIT); if (p == NULL) return ENOMEM; if (decoder_set != NULL) { bcopy(decoder_set, p, sizeof(*p)*decoders); free(decoder_set, M_DEVBUF); } decoder_set = p; i = decoders++; } decoder_set[i] = decoder; } return 0; } int splash_unregister(splash_decoder_t *decoder) { int error; if (splash_decoder == decoder) { if ((error = splash_term(splash_adp)) != 0) return error; } return 0; } int splash_init(video_adapter_t *adp, int (*callback)(int, void *), void *arg) { int i; splash_adp = adp; splash_callback = callback; splash_arg = arg; splash_decoder = NULL; for (i = 0; i < decoders; ++i) { if (decoder_set[i] == NULL) continue; if (splash_test(decoder_set[i]) == 0) { splash_new(decoder_set[i]); break; } decoder_set[i] = NULL; } for (++i; i < decoders; ++i) { decoder_set[i] = NULL; } return 0; } int splash_term(video_adapter_t *adp) { int error = 0; if (splash_adp != adp) return EINVAL; if (splash_decoder != NULL) { if (splash_callback != NULL) error = (*splash_callback)(SPLASH_TERM, splash_arg); if (error == 0 && splash_decoder->term) error = (*splash_decoder->term)(adp); if (error == 0) splash_decoder = NULL; } return error; } int splash(video_adapter_t *adp, int on) { if (splash_decoder != NULL) return (*splash_decoder->splash)(adp, on); return ENODEV; } Index: head/sys/dev/gpio/gpiobus.c =================================================================== --- head/sys/dev/gpio/gpiobus.c (revision 327948) +++ head/sys/dev/gpio/gpiobus.c (revision 327949) @@ -1,862 +1,862 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #ifdef INTRNG #include #endif #include #include #include #include #include "gpiobus_if.h" #undef GPIOBUS_DEBUG #ifdef GPIOBUS_DEBUG #define dprintf printf #else #define dprintf(x, arg...) #endif static void gpiobus_print_pins(struct gpiobus_ivar *, char *, size_t); static int gpiobus_parse_pins(struct gpiobus_softc *, device_t, int); static int gpiobus_probe(device_t); static int gpiobus_attach(device_t); static int gpiobus_detach(device_t); static int gpiobus_suspend(device_t); static int gpiobus_resume(device_t); static void gpiobus_probe_nomatch(device_t, device_t); static int gpiobus_print_child(device_t, device_t); static int gpiobus_child_location_str(device_t, device_t, char *, size_t); static int gpiobus_child_pnpinfo_str(device_t, device_t, char *, size_t); static device_t gpiobus_add_child(device_t, u_int, const char *, int); static void gpiobus_hinted_child(device_t, const char *, int); /* * GPIOBUS interface */ static int gpiobus_acquire_bus(device_t, device_t, int); static void gpiobus_release_bus(device_t, device_t); static int gpiobus_pin_setflags(device_t, device_t, uint32_t, uint32_t); static int gpiobus_pin_getflags(device_t, device_t, uint32_t, uint32_t*); static int gpiobus_pin_getcaps(device_t, device_t, uint32_t, uint32_t*); static int gpiobus_pin_set(device_t, device_t, uint32_t, unsigned int); static int gpiobus_pin_get(device_t, device_t, uint32_t, unsigned int*); static int gpiobus_pin_toggle(device_t, device_t, uint32_t); /* * XXX -> Move me to better place - gpio_subr.c? * Also, this function must be changed when interrupt configuration * data will be moved into struct resource. */ #ifdef INTRNG struct resource * gpio_alloc_intr_resource(device_t consumer_dev, int *rid, u_int alloc_flags, gpio_pin_t pin, uint32_t intr_mode) { u_int irq; struct intr_map_data_gpio *gpio_data; struct resource *res; gpio_data = (struct intr_map_data_gpio *)intr_alloc_map_data( INTR_MAP_DATA_GPIO, sizeof(*gpio_data), M_WAITOK | M_ZERO); gpio_data->gpio_pin_num = pin->pin; gpio_data->gpio_pin_flags = pin->flags; gpio_data->gpio_intr_mode = intr_mode; irq = intr_map_irq(pin->dev, 0, (struct intr_map_data *)gpio_data); res = bus_alloc_resource(consumer_dev, SYS_RES_IRQ, rid, irq, irq, 1, alloc_flags); if (res == NULL) { intr_free_intr_map_data((struct intr_map_data *)gpio_data); return (NULL); } rman_set_virtual(res, gpio_data); return (res); } #else struct resource * gpio_alloc_intr_resource(device_t consumer_dev, int *rid, u_int alloc_flags, gpio_pin_t pin, uint32_t intr_mode) { return (NULL); } #endif int gpio_check_flags(uint32_t caps, uint32_t flags) { /* Filter unwanted flags. */ flags &= caps; /* Cannot mix input/output together. */ if (flags & GPIO_PIN_INPUT && flags & GPIO_PIN_OUTPUT) return (EINVAL); /* Cannot mix pull-up/pull-down together. */ if (flags & GPIO_PIN_PULLUP && flags & GPIO_PIN_PULLDOWN) return (EINVAL); return (0); } static void gpiobus_print_pins(struct gpiobus_ivar *devi, char *buf, size_t buflen) { char tmp[128]; int i, range_start, range_stop, need_coma; if (devi->npins == 0) return; need_coma = 0; range_start = range_stop = devi->pins[0]; for (i = 1; i < devi->npins; i++) { if (devi->pins[i] != (range_stop + 1)) { if (need_coma) strlcat(buf, ",", buflen); memset(tmp, 0, sizeof(tmp)); if (range_start != range_stop) snprintf(tmp, sizeof(tmp) - 1, "%d-%d", range_start, range_stop); else snprintf(tmp, sizeof(tmp) - 1, "%d", range_start); strlcat(buf, tmp, buflen); range_start = range_stop = devi->pins[i]; need_coma = 1; } else range_stop++; } if (need_coma) strlcat(buf, ",", buflen); memset(tmp, 0, sizeof(tmp)); if (range_start != range_stop) snprintf(tmp, sizeof(tmp) - 1, "%d-%d", range_start, range_stop); else snprintf(tmp, sizeof(tmp) - 1, "%d", range_start); strlcat(buf, tmp, buflen); } device_t gpiobus_attach_bus(device_t dev) { device_t busdev; busdev = device_add_child(dev, "gpiobus", -1); if (busdev == NULL) return (NULL); if (device_add_child(dev, "gpioc", -1) == NULL) { device_delete_child(dev, busdev); return (NULL); } #ifdef FDT ofw_gpiobus_register_provider(dev); #endif bus_generic_attach(dev); return (busdev); } int gpiobus_detach_bus(device_t dev) { int err; #ifdef FDT ofw_gpiobus_unregister_provider(dev); #endif err = bus_generic_detach(dev); if (err != 0) return (err); return (device_delete_children(dev)); } int gpiobus_init_softc(device_t dev) { struct gpiobus_softc *sc; sc = GPIOBUS_SOFTC(dev); sc->sc_busdev = dev; sc->sc_dev = device_get_parent(dev); sc->sc_intr_rman.rm_type = RMAN_ARRAY; sc->sc_intr_rman.rm_descr = "GPIO Interrupts"; if (rman_init(&sc->sc_intr_rman) != 0 || rman_manage_region(&sc->sc_intr_rman, 0, ~0) != 0) panic("%s: failed to set up rman.", __func__); if (GPIO_PIN_MAX(sc->sc_dev, &sc->sc_npins) != 0) return (ENXIO); KASSERT(sc->sc_npins >= 0, ("GPIO device with no pins")); /* Pins = GPIO_PIN_MAX() + 1 */ sc->sc_npins++; - sc->sc_pins = malloc(sizeof(*sc->sc_pins) * sc->sc_npins, M_DEVBUF, + sc->sc_pins = mallocarray(sc->sc_npins, sizeof(*sc->sc_pins), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->sc_pins == NULL) return (ENOMEM); /* Initialize the bus lock. */ GPIOBUS_LOCK_INIT(sc); return (0); } int gpiobus_alloc_ivars(struct gpiobus_ivar *devi) { /* Allocate pins and flags memory. */ - devi->pins = malloc(sizeof(uint32_t) * devi->npins, M_DEVBUF, + devi->pins = mallocarray(devi->npins, sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi->pins == NULL) return (ENOMEM); - devi->flags = malloc(sizeof(uint32_t) * devi->npins, M_DEVBUF, + devi->flags = mallocarray(devi->npins, sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi->flags == NULL) { free(devi->pins, M_DEVBUF); return (ENOMEM); } return (0); } void gpiobus_free_ivars(struct gpiobus_ivar *devi) { if (devi->flags) { free(devi->flags, M_DEVBUF); devi->flags = NULL; } if (devi->pins) { free(devi->pins, M_DEVBUF); devi->pins = NULL; } } int gpiobus_acquire_pin(device_t bus, uint32_t pin) { struct gpiobus_softc *sc; sc = device_get_softc(bus); /* Consistency check. */ if (pin >= sc->sc_npins) { device_printf(bus, "invalid pin %d, max: %d\n", pin, sc->sc_npins - 1); return (-1); } /* Mark pin as mapped and give warning if it's already mapped. */ if (sc->sc_pins[pin].mapped) { device_printf(bus, "warning: pin %d is already mapped\n", pin); return (-1); } sc->sc_pins[pin].mapped = 1; return (0); } /* Release mapped pin */ int gpiobus_release_pin(device_t bus, uint32_t pin) { struct gpiobus_softc *sc; sc = device_get_softc(bus); /* Consistency check. */ if (pin >= sc->sc_npins) { device_printf(bus, "gpiobus_acquire_pin: invalid pin %d, max=%d\n", pin, sc->sc_npins - 1); return (-1); } if (!sc->sc_pins[pin].mapped) { device_printf(bus, "gpiobus_acquire_pin: pin %d is not mapped\n", pin); return (-1); } sc->sc_pins[pin].mapped = 0; return (0); } static int gpiobus_parse_pins(struct gpiobus_softc *sc, device_t child, int mask) { struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); int i, npins; npins = 0; for (i = 0; i < 32; i++) { if (mask & (1 << i)) npins++; } if (npins == 0) { device_printf(child, "empty pin mask\n"); return (EINVAL); } devi->npins = npins; if (gpiobus_alloc_ivars(devi) != 0) { device_printf(child, "cannot allocate device ivars\n"); return (EINVAL); } npins = 0; for (i = 0; i < 32; i++) { if ((mask & (1 << i)) == 0) continue; /* Reserve the GPIO pin. */ if (gpiobus_acquire_pin(sc->sc_busdev, i) != 0) { gpiobus_free_ivars(devi); return (EINVAL); } devi->pins[npins++] = i; /* Use the child name as pin name. */ GPIOBUS_PIN_SETNAME(sc->sc_busdev, i, device_get_nameunit(child)); } return (0); } static int gpiobus_probe(device_t dev) { device_set_desc(dev, "GPIO bus"); return (BUS_PROBE_GENERIC); } static int gpiobus_attach(device_t dev) { int err; err = gpiobus_init_softc(dev); if (err != 0) return (err); /* * Get parent's pins and mark them as unmapped */ bus_generic_probe(dev); bus_enumerate_hinted_children(dev); return (bus_generic_attach(dev)); } /* * Since this is not a self-enumerating bus, and since we always add * children in attach, we have to always delete children here. */ static int gpiobus_detach(device_t dev) { struct gpiobus_softc *sc; struct gpiobus_ivar *devi; device_t *devlist; int i, err, ndevs; sc = GPIOBUS_SOFTC(dev); KASSERT(mtx_initialized(&sc->sc_mtx), ("gpiobus mutex not initialized")); GPIOBUS_LOCK_DESTROY(sc); if ((err = bus_generic_detach(dev)) != 0) return (err); if ((err = device_get_children(dev, &devlist, &ndevs)) != 0) return (err); for (i = 0; i < ndevs; i++) { devi = GPIOBUS_IVAR(devlist[i]); gpiobus_free_ivars(devi); resource_list_free(&devi->rl); free(devi, M_DEVBUF); device_delete_child(dev, devlist[i]); } free(devlist, M_TEMP); rman_fini(&sc->sc_intr_rman); if (sc->sc_pins) { for (i = 0; i < sc->sc_npins; i++) { if (sc->sc_pins[i].name != NULL) free(sc->sc_pins[i].name, M_DEVBUF); sc->sc_pins[i].name = NULL; } free(sc->sc_pins, M_DEVBUF); sc->sc_pins = NULL; } return (0); } static int gpiobus_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int gpiobus_resume(device_t dev) { return (bus_generic_resume(dev)); } static void gpiobus_probe_nomatch(device_t dev, device_t child) { char pins[128]; struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(child); memset(pins, 0, sizeof(pins)); gpiobus_print_pins(devi, pins, sizeof(pins)); if (devi->npins > 1) device_printf(dev, " at pins %s", pins); else device_printf(dev, " at pin %s", pins); resource_list_print_type(&devi->rl, "irq", SYS_RES_IRQ, "%jd"); printf("\n"); } static int gpiobus_print_child(device_t dev, device_t child) { char pins[128]; int retval = 0; struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(child); memset(pins, 0, sizeof(pins)); retval += bus_print_child_header(dev, child); if (devi->npins > 0) { if (devi->npins > 1) retval += printf(" at pins "); else retval += printf(" at pin "); gpiobus_print_pins(devi, pins, sizeof(pins)); retval += printf("%s", pins); } resource_list_print_type(&devi->rl, "irq", SYS_RES_IRQ, "%jd"); retval += bus_print_child_footer(dev, child); return (retval); } static int gpiobus_child_location_str(device_t bus, device_t child, char *buf, size_t buflen) { struct gpiobus_ivar *devi; devi = GPIOBUS_IVAR(child); if (devi->npins > 1) strlcpy(buf, "pins=", buflen); else strlcpy(buf, "pin=", buflen); gpiobus_print_pins(devi, buf, buflen); return (0); } static int gpiobus_child_pnpinfo_str(device_t bus, device_t child, char *buf, size_t buflen) { *buf = '\0'; return (0); } static device_t gpiobus_add_child(device_t dev, u_int order, const char *name, int unit) { device_t child; struct gpiobus_ivar *devi; child = device_add_child_ordered(dev, order, name, unit); if (child == NULL) return (child); devi = malloc(sizeof(struct gpiobus_ivar), M_DEVBUF, M_NOWAIT | M_ZERO); if (devi == NULL) { device_delete_child(dev, child); return (NULL); } resource_list_init(&devi->rl); device_set_ivars(child, devi); return (child); } static void gpiobus_hinted_child(device_t bus, const char *dname, int dunit) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(bus); struct gpiobus_ivar *devi; device_t child; int irq, pins; child = BUS_ADD_CHILD(bus, 0, dname, dunit); devi = GPIOBUS_IVAR(child); resource_int_value(dname, dunit, "pins", &pins); if (gpiobus_parse_pins(sc, child, pins)) { resource_list_free(&devi->rl); free(devi, M_DEVBUF); device_delete_child(bus, child); } if (resource_int_value(dname, dunit, "irq", &irq) == 0) { if (bus_set_resource(child, SYS_RES_IRQ, 0, irq, 1) != 0) device_printf(bus, "warning: bus_set_resource() failed\n"); } } static int gpiobus_set_resource(device_t dev, device_t child, int type, int rid, rman_res_t start, rman_res_t count) { struct gpiobus_ivar *devi; struct resource_list_entry *rle; dprintf("%s: entry (%p, %p, %d, %d, %p, %ld)\n", __func__, dev, child, type, rid, (void *)(intptr_t)start, count); devi = GPIOBUS_IVAR(child); rle = resource_list_add(&devi->rl, type, rid, start, start + count - 1, count); if (rle == NULL) return (ENXIO); return (0); } static struct resource * gpiobus_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct gpiobus_softc *sc; struct resource *rv; struct resource_list *rl; struct resource_list_entry *rle; int isdefault; if (type != SYS_RES_IRQ) return (NULL); isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); rle = NULL; if (isdefault) { rl = BUS_GET_RESOURCE_LIST(bus, child); if (rl == NULL) return (NULL); rle = resource_list_find(rl, type, *rid); if (rle == NULL) return (NULL); if (rle->res != NULL) panic("%s: resource entry is busy", __func__); start = rle->start; count = rle->count; end = rle->end; } sc = device_get_softc(bus); rv = rman_reserve_resource(&sc->sc_intr_rman, start, end, count, flags, child); if (rv == NULL) return (NULL); rman_set_rid(rv, *rid); if ((flags & RF_ACTIVE) != 0 && bus_activate_resource(child, type, *rid, rv) != 0) { rman_release_resource(rv); return (NULL); } return (rv); } static int gpiobus_release_resource(device_t bus __unused, device_t child, int type, int rid, struct resource *r) { int error; if (rman_get_flags(r) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, r); if (error) return (error); } return (rman_release_resource(r)); } static struct resource_list * gpiobus_get_resource_list(device_t bus __unused, device_t child) { struct gpiobus_ivar *ivar; ivar = GPIOBUS_IVAR(child); return (&ivar->rl); } static int gpiobus_acquire_bus(device_t busdev, device_t child, int how) { struct gpiobus_softc *sc; sc = device_get_softc(busdev); GPIOBUS_ASSERT_UNLOCKED(sc); GPIOBUS_LOCK(sc); if (sc->sc_owner != NULL) { if (sc->sc_owner == child) panic("%s: %s still owns the bus.", device_get_nameunit(busdev), device_get_nameunit(child)); if (how == GPIOBUS_DONTWAIT) { GPIOBUS_UNLOCK(sc); return (EWOULDBLOCK); } while (sc->sc_owner != NULL) mtx_sleep(sc, &sc->sc_mtx, 0, "gpiobuswait", 0); } sc->sc_owner = child; GPIOBUS_UNLOCK(sc); return (0); } static void gpiobus_release_bus(device_t busdev, device_t child) { struct gpiobus_softc *sc; sc = device_get_softc(busdev); GPIOBUS_ASSERT_UNLOCKED(sc); GPIOBUS_LOCK(sc); if (sc->sc_owner == NULL) panic("%s: %s releasing unowned bus.", device_get_nameunit(busdev), device_get_nameunit(child)); if (sc->sc_owner != child) panic("%s: %s trying to release bus owned by %s", device_get_nameunit(busdev), device_get_nameunit(child), device_get_nameunit(sc->sc_owner)); sc->sc_owner = NULL; wakeup(sc); GPIOBUS_UNLOCK(sc); } static int gpiobus_pin_setflags(device_t dev, device_t child, uint32_t pin, uint32_t flags) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); uint32_t caps; if (pin >= devi->npins) return (EINVAL); if (GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], &caps) != 0) return (EINVAL); if (gpio_check_flags(caps, flags) != 0) return (EINVAL); return (GPIO_PIN_SETFLAGS(sc->sc_dev, devi->pins[pin], flags)); } static int gpiobus_pin_getflags(device_t dev, device_t child, uint32_t pin, uint32_t *flags) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_GETFLAGS(sc->sc_dev, devi->pins[pin], flags); } static int gpiobus_pin_getcaps(device_t dev, device_t child, uint32_t pin, uint32_t *caps) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_GETCAPS(sc->sc_dev, devi->pins[pin], caps); } static int gpiobus_pin_set(device_t dev, device_t child, uint32_t pin, unsigned int value) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_SET(sc->sc_dev, devi->pins[pin], value); } static int gpiobus_pin_get(device_t dev, device_t child, uint32_t pin, unsigned int *value) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_GET(sc->sc_dev, devi->pins[pin], value); } static int gpiobus_pin_toggle(device_t dev, device_t child, uint32_t pin) { struct gpiobus_softc *sc = GPIOBUS_SOFTC(dev); struct gpiobus_ivar *devi = GPIOBUS_IVAR(child); if (pin >= devi->npins) return (EINVAL); return GPIO_PIN_TOGGLE(sc->sc_dev, devi->pins[pin]); } static int gpiobus_pin_getname(device_t dev, uint32_t pin, char *name) { struct gpiobus_softc *sc; sc = GPIOBUS_SOFTC(dev); if (pin > sc->sc_npins) return (EINVAL); /* Did we have a name for this pin ? */ if (sc->sc_pins[pin].name != NULL) { memcpy(name, sc->sc_pins[pin].name, GPIOMAXNAME); return (0); } /* Return the default pin name. */ return (GPIO_PIN_GETNAME(device_get_parent(dev), pin, name)); } static int gpiobus_pin_setname(device_t dev, uint32_t pin, const char *name) { struct gpiobus_softc *sc; sc = GPIOBUS_SOFTC(dev); if (pin > sc->sc_npins) return (EINVAL); if (name == NULL) return (EINVAL); /* Save the pin name. */ if (sc->sc_pins[pin].name == NULL) sc->sc_pins[pin].name = malloc(GPIOMAXNAME, M_DEVBUF, M_WAITOK | M_ZERO); strlcpy(sc->sc_pins[pin].name, name, GPIOMAXNAME); return (0); } static device_method_t gpiobus_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gpiobus_probe), DEVMETHOD(device_attach, gpiobus_attach), DEVMETHOD(device_detach, gpiobus_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, gpiobus_suspend), DEVMETHOD(device_resume, gpiobus_resume), /* Bus interface */ DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_config_intr, bus_generic_config_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_set_resource, gpiobus_set_resource), DEVMETHOD(bus_alloc_resource, gpiobus_alloc_resource), DEVMETHOD(bus_release_resource, gpiobus_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_get_resource_list, gpiobus_get_resource_list), DEVMETHOD(bus_add_child, gpiobus_add_child), DEVMETHOD(bus_probe_nomatch, gpiobus_probe_nomatch), DEVMETHOD(bus_print_child, gpiobus_print_child), DEVMETHOD(bus_child_pnpinfo_str, gpiobus_child_pnpinfo_str), DEVMETHOD(bus_child_location_str, gpiobus_child_location_str), DEVMETHOD(bus_hinted_child, gpiobus_hinted_child), /* GPIO protocol */ DEVMETHOD(gpiobus_acquire_bus, gpiobus_acquire_bus), DEVMETHOD(gpiobus_release_bus, gpiobus_release_bus), DEVMETHOD(gpiobus_pin_getflags, gpiobus_pin_getflags), DEVMETHOD(gpiobus_pin_getcaps, gpiobus_pin_getcaps), DEVMETHOD(gpiobus_pin_setflags, gpiobus_pin_setflags), DEVMETHOD(gpiobus_pin_get, gpiobus_pin_get), DEVMETHOD(gpiobus_pin_set, gpiobus_pin_set), DEVMETHOD(gpiobus_pin_toggle, gpiobus_pin_toggle), DEVMETHOD(gpiobus_pin_getname, gpiobus_pin_getname), DEVMETHOD(gpiobus_pin_setname, gpiobus_pin_setname), DEVMETHOD_END }; driver_t gpiobus_driver = { "gpiobus", gpiobus_methods, sizeof(struct gpiobus_softc) }; devclass_t gpiobus_devclass; EARLY_DRIVER_MODULE(gpiobus, gpio, gpiobus_driver, gpiobus_devclass, 0, 0, BUS_PASS_BUS + BUS_PASS_ORDER_MIDDLE); MODULE_VERSION(gpiobus, 1); Index: head/sys/dev/if_ndis/if_ndis.c =================================================================== --- head/sys/dev/if_ndis/if_ndis.c (revision 327948) +++ head/sys/dev/if_ndis/if_ndis.c (revision 327949) @@ -1,3426 +1,3426 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2003 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. * * WPA support originally contributed by Arvind Srinivasan * then hacked upon mercilessly by my. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define NDIS_DEBUG #ifdef NDIS_DEBUG #define DPRINTF(x) do { if (ndis_debug > 0) printf x; } while (0) int ndis_debug = 0; SYSCTL_INT(_debug, OID_AUTO, ndis, CTLFLAG_RW, &ndis_debug, 0, "if_ndis debug level"); #else #define DPRINTF(x) #endif SYSCTL_DECL(_hw_ndisusb); int ndisusb_halt = 1; SYSCTL_INT(_hw_ndisusb, OID_AUTO, halt, CTLFLAG_RW, &ndisusb_halt, 0, "Halt NDIS USB driver when it's attached"); /* 0 - 30 dBm to mW conversion table */ static const uint16_t dBm2mW[] = { 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 6, 6, 7, 8, 9, 10, 11, 13, 14, 16, 18, 20, 22, 25, 28, 32, 35, 40, 45, 50, 56, 63, 71, 79, 89, 100, 112, 126, 141, 158, 178, 200, 224, 251, 282, 316, 355, 398, 447, 501, 562, 631, 708, 794, 891, 1000 }; MODULE_DEPEND(ndis, ether, 1, 1, 1); MODULE_DEPEND(ndis, wlan, 1, 1, 1); MODULE_DEPEND(ndis, ndisapi, 1, 1, 1); MODULE_VERSION(ndis, 1); int ndis_attach (device_t); int ndis_detach (device_t); int ndis_suspend (device_t); int ndis_resume (device_t); void ndis_shutdown (device_t); int ndisdrv_modevent (module_t, int, void *); static void ndis_txeof (ndis_handle, ndis_packet *, ndis_status); static void ndis_rxeof (ndis_handle, ndis_packet **, uint32_t); static void ndis_rxeof_eth (ndis_handle, ndis_handle, char *, void *, uint32_t, void *, uint32_t, uint32_t); static void ndis_rxeof_done (ndis_handle); static void ndis_rxeof_xfr (kdpc *, ndis_handle, void *, void *); static void ndis_rxeof_xfr_done (ndis_handle, ndis_packet *, uint32_t, uint32_t); static void ndis_linksts (ndis_handle, ndis_status, void *, uint32_t); static void ndis_linksts_done (ndis_handle); /* We need to wrap these functions for amd64. */ static funcptr ndis_txeof_wrap; static funcptr ndis_rxeof_wrap; static funcptr ndis_rxeof_eth_wrap; static funcptr ndis_rxeof_done_wrap; static funcptr ndis_rxeof_xfr_wrap; static funcptr ndis_rxeof_xfr_done_wrap; static funcptr ndis_linksts_wrap; static funcptr ndis_linksts_done_wrap; static funcptr ndis_ticktask_wrap; static funcptr ndis_ifstarttask_wrap; static funcptr ndis_resettask_wrap; static funcptr ndis_inputtask_wrap; static struct ieee80211vap *ndis_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void ndis_vap_delete (struct ieee80211vap *); static void ndis_tick (void *); static void ndis_ticktask (device_object *, void *); static int ndis_raw_xmit (struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void ndis_update_mcast (struct ieee80211com *); static void ndis_update_promisc (struct ieee80211com *); static void ndis_ifstart (struct ifnet *); static void ndis_ifstarttask (device_object *, void *); static void ndis_resettask (device_object *, void *); static void ndis_inputtask (device_object *, void *); static int ndis_ifioctl (struct ifnet *, u_long, caddr_t); static int ndis_newstate (struct ieee80211vap *, enum ieee80211_state, int); static int ndis_nettype_chan (uint32_t); static int ndis_nettype_mode (uint32_t); static void ndis_scan (void *); static void ndis_scan_results (struct ndis_softc *); static void ndis_scan_start (struct ieee80211com *); static void ndis_scan_end (struct ieee80211com *); static void ndis_set_channel (struct ieee80211com *); static void ndis_scan_curchan (struct ieee80211_scan_state *, unsigned long); static void ndis_scan_mindwell (struct ieee80211_scan_state *); static void ndis_init (void *); static void ndis_stop (struct ndis_softc *); static int ndis_ifmedia_upd (struct ifnet *); static void ndis_ifmedia_sts (struct ifnet *, struct ifmediareq *); static int ndis_get_bssid_list (struct ndis_softc *, ndis_80211_bssid_list_ex **); static int ndis_get_assoc (struct ndis_softc *, ndis_wlan_bssid_ex **); static int ndis_probe_offload (struct ndis_softc *); static int ndis_set_offload (struct ndis_softc *); static void ndis_getstate_80211 (struct ndis_softc *); static void ndis_setstate_80211 (struct ndis_softc *); static void ndis_auth_and_assoc (struct ndis_softc *, struct ieee80211vap *); static void ndis_media_status (struct ifnet *, struct ifmediareq *); static int ndis_set_cipher (struct ndis_softc *, int); static int ndis_set_wpa (struct ndis_softc *, void *, int); static int ndis_add_key (struct ieee80211vap *, const struct ieee80211_key *); static int ndis_del_key (struct ieee80211vap *, const struct ieee80211_key *); static void ndis_setmulti (struct ndis_softc *); static void ndis_map_sclist (void *, bus_dma_segment_t *, int, bus_size_t, int); static int ndis_ifattach(struct ndis_softc *); static int ndis_80211attach(struct ndis_softc *); static int ndis_80211ioctl(struct ieee80211com *, u_long , void *); static int ndis_80211transmit(struct ieee80211com *, struct mbuf *); static void ndis_80211parent(struct ieee80211com *); static int ndisdrv_loaded = 0; /* * This routine should call windrv_load() once for each driver * image. This will do the relocation and dynalinking for the * image, and create a Windows driver object which will be * saved in our driver database. */ int ndisdrv_modevent(mod, cmd, arg) module_t mod; int cmd; void *arg; { int error = 0; switch (cmd) { case MOD_LOAD: ndisdrv_loaded++; if (ndisdrv_loaded > 1) break; windrv_wrap((funcptr)ndis_rxeof, &ndis_rxeof_wrap, 3, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_rxeof_eth, &ndis_rxeof_eth_wrap, 8, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_rxeof_done, &ndis_rxeof_done_wrap, 1, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_rxeof_xfr, &ndis_rxeof_xfr_wrap, 4, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_rxeof_xfr_done, &ndis_rxeof_xfr_done_wrap, 4, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_txeof, &ndis_txeof_wrap, 3, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_linksts, &ndis_linksts_wrap, 4, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_linksts_done, &ndis_linksts_done_wrap, 1, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_ticktask, &ndis_ticktask_wrap, 2, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_ifstarttask, &ndis_ifstarttask_wrap, 2, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_resettask, &ndis_resettask_wrap, 2, WINDRV_WRAP_STDCALL); windrv_wrap((funcptr)ndis_inputtask, &ndis_inputtask_wrap, 2, WINDRV_WRAP_STDCALL); break; case MOD_UNLOAD: ndisdrv_loaded--; if (ndisdrv_loaded > 0) break; /* fallthrough */ case MOD_SHUTDOWN: windrv_unwrap(ndis_rxeof_wrap); windrv_unwrap(ndis_rxeof_eth_wrap); windrv_unwrap(ndis_rxeof_done_wrap); windrv_unwrap(ndis_rxeof_xfr_wrap); windrv_unwrap(ndis_rxeof_xfr_done_wrap); windrv_unwrap(ndis_txeof_wrap); windrv_unwrap(ndis_linksts_wrap); windrv_unwrap(ndis_linksts_done_wrap); windrv_unwrap(ndis_ticktask_wrap); windrv_unwrap(ndis_ifstarttask_wrap); windrv_unwrap(ndis_resettask_wrap); windrv_unwrap(ndis_inputtask_wrap); break; default: error = EINVAL; break; } return (error); } /* * Program the 64-bit multicast hash filter. */ static void ndis_setmulti(sc) struct ndis_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; int len, mclistsz, error; uint8_t *mclist; if (!NDIS_INITIALIZED(sc)) return; if (sc->ndis_80211) return; ifp = sc->ifp; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; len = sizeof(sc->ndis_filter); error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER, &sc->ndis_filter, &len); if (error) device_printf(sc->ndis_dev, "set allmulti failed: %d\n", error); return; } if (TAILQ_EMPTY(&ifp->if_multiaddrs)) return; len = sizeof(mclistsz); ndis_get_info(sc, OID_802_3_MAXIMUM_LIST_SIZE, &mclistsz, &len); mclist = malloc(ETHER_ADDR_LEN * mclistsz, M_TEMP, M_NOWAIT|M_ZERO); if (mclist == NULL) { sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; goto out; } sc->ndis_filter |= NDIS_PACKET_TYPE_MULTICAST; len = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), mclist + (ETHER_ADDR_LEN * len), ETHER_ADDR_LEN); len++; if (len > mclistsz) { if_maddr_runlock(ifp); sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; sc->ndis_filter &= ~NDIS_PACKET_TYPE_MULTICAST; goto out; } } if_maddr_runlock(ifp); len = len * ETHER_ADDR_LEN; error = ndis_set_info(sc, OID_802_3_MULTICAST_LIST, mclist, &len); if (error) { device_printf(sc->ndis_dev, "set mclist failed: %d\n", error); sc->ndis_filter |= NDIS_PACKET_TYPE_ALL_MULTICAST; sc->ndis_filter &= ~NDIS_PACKET_TYPE_MULTICAST; } out: free(mclist, M_TEMP); len = sizeof(sc->ndis_filter); error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER, &sc->ndis_filter, &len); if (error) device_printf(sc->ndis_dev, "set multi failed: %d\n", error); } static int ndis_set_offload(sc) struct ndis_softc *sc; { ndis_task_offload *nto; ndis_task_offload_hdr *ntoh; ndis_task_tcpip_csum *nttc; struct ifnet *ifp; int len, error; if (!NDIS_INITIALIZED(sc)) return (EINVAL); if (sc->ndis_80211) return (EINVAL); /* See if there's anything to set. */ ifp = sc->ifp; error = ndis_probe_offload(sc); if (error) return (error); if (sc->ndis_hwassist == 0 && ifp->if_capabilities == 0) return (0); len = sizeof(ndis_task_offload_hdr) + sizeof(ndis_task_offload) + sizeof(ndis_task_tcpip_csum); ntoh = malloc(len, M_TEMP, M_NOWAIT|M_ZERO); if (ntoh == NULL) return (ENOMEM); ntoh->ntoh_vers = NDIS_TASK_OFFLOAD_VERSION; ntoh->ntoh_len = sizeof(ndis_task_offload_hdr); ntoh->ntoh_offset_firsttask = sizeof(ndis_task_offload_hdr); ntoh->ntoh_encapfmt.nef_encaphdrlen = sizeof(struct ether_header); ntoh->ntoh_encapfmt.nef_encap = NDIS_ENCAP_IEEE802_3; ntoh->ntoh_encapfmt.nef_flags = NDIS_ENCAPFLAG_FIXEDHDRLEN; nto = (ndis_task_offload *)((char *)ntoh + ntoh->ntoh_offset_firsttask); nto->nto_vers = NDIS_TASK_OFFLOAD_VERSION; nto->nto_len = sizeof(ndis_task_offload); nto->nto_task = NDIS_TASK_TCPIP_CSUM; nto->nto_offset_nexttask = 0; nto->nto_taskbuflen = sizeof(ndis_task_tcpip_csum); nttc = (ndis_task_tcpip_csum *)nto->nto_taskbuf; if (ifp->if_capenable & IFCAP_TXCSUM) nttc->nttc_v4tx = sc->ndis_v4tx; if (ifp->if_capenable & IFCAP_RXCSUM) nttc->nttc_v4rx = sc->ndis_v4rx; error = ndis_set_info(sc, OID_TCP_TASK_OFFLOAD, ntoh, &len); free(ntoh, M_TEMP); return (error); } static int ndis_probe_offload(sc) struct ndis_softc *sc; { ndis_task_offload *nto; ndis_task_offload_hdr *ntoh; ndis_task_tcpip_csum *nttc = NULL; struct ifnet *ifp; int len, error, dummy; ifp = sc->ifp; len = sizeof(dummy); error = ndis_get_info(sc, OID_TCP_TASK_OFFLOAD, &dummy, &len); if (error != ENOSPC) return (error); ntoh = malloc(len, M_TEMP, M_NOWAIT|M_ZERO); if (ntoh == NULL) return (ENOMEM); ntoh->ntoh_vers = NDIS_TASK_OFFLOAD_VERSION; ntoh->ntoh_len = sizeof(ndis_task_offload_hdr); ntoh->ntoh_encapfmt.nef_encaphdrlen = sizeof(struct ether_header); ntoh->ntoh_encapfmt.nef_encap = NDIS_ENCAP_IEEE802_3; ntoh->ntoh_encapfmt.nef_flags = NDIS_ENCAPFLAG_FIXEDHDRLEN; error = ndis_get_info(sc, OID_TCP_TASK_OFFLOAD, ntoh, &len); if (error) { free(ntoh, M_TEMP); return (error); } if (ntoh->ntoh_vers != NDIS_TASK_OFFLOAD_VERSION) { free(ntoh, M_TEMP); return (EINVAL); } nto = (ndis_task_offload *)((char *)ntoh + ntoh->ntoh_offset_firsttask); while (1) { switch (nto->nto_task) { case NDIS_TASK_TCPIP_CSUM: nttc = (ndis_task_tcpip_csum *)nto->nto_taskbuf; break; /* Don't handle these yet. */ case NDIS_TASK_IPSEC: case NDIS_TASK_TCP_LARGESEND: default: break; } if (nto->nto_offset_nexttask == 0) break; nto = (ndis_task_offload *)((char *)nto + nto->nto_offset_nexttask); } if (nttc == NULL) { free(ntoh, M_TEMP); return (ENOENT); } sc->ndis_v4tx = nttc->nttc_v4tx; sc->ndis_v4rx = nttc->nttc_v4rx; if (nttc->nttc_v4tx & NDIS_TCPSUM_FLAGS_IP_CSUM) sc->ndis_hwassist |= CSUM_IP; if (nttc->nttc_v4tx & NDIS_TCPSUM_FLAGS_TCP_CSUM) sc->ndis_hwassist |= CSUM_TCP; if (nttc->nttc_v4tx & NDIS_TCPSUM_FLAGS_UDP_CSUM) sc->ndis_hwassist |= CSUM_UDP; if (sc->ndis_hwassist) ifp->if_capabilities |= IFCAP_TXCSUM; if (nttc->nttc_v4rx & NDIS_TCPSUM_FLAGS_IP_CSUM) ifp->if_capabilities |= IFCAP_RXCSUM; if (nttc->nttc_v4rx & NDIS_TCPSUM_FLAGS_TCP_CSUM) ifp->if_capabilities |= IFCAP_RXCSUM; if (nttc->nttc_v4rx & NDIS_TCPSUM_FLAGS_UDP_CSUM) ifp->if_capabilities |= IFCAP_RXCSUM; free(ntoh, M_TEMP); return (0); } static int ndis_nettype_chan(uint32_t type) { switch (type) { case NDIS_80211_NETTYPE_11FH: return (IEEE80211_CHAN_FHSS); case NDIS_80211_NETTYPE_11DS: return (IEEE80211_CHAN_B); case NDIS_80211_NETTYPE_11OFDM5: return (IEEE80211_CHAN_A); case NDIS_80211_NETTYPE_11OFDM24: return (IEEE80211_CHAN_G); } DPRINTF(("unknown channel nettype %d\n", type)); return (IEEE80211_CHAN_B); /* Default to 11B chan */ } static int ndis_nettype_mode(uint32_t type) { switch (type) { case NDIS_80211_NETTYPE_11FH: return (IEEE80211_MODE_FH); case NDIS_80211_NETTYPE_11DS: return (IEEE80211_MODE_11B); case NDIS_80211_NETTYPE_11OFDM5: return (IEEE80211_MODE_11A); case NDIS_80211_NETTYPE_11OFDM24: return (IEEE80211_MODE_11G); } DPRINTF(("unknown mode nettype %d\n", type)); return (IEEE80211_MODE_AUTO); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ int ndis_attach(device_t dev) { struct ndis_softc *sc; driver_object *pdrv; device_object *pdo; int error = 0, len; int i; sc = device_get_softc(dev); mtx_init(&sc->ndis_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); KeInitializeSpinLock(&sc->ndis_rxlock); KeInitializeSpinLock(&sc->ndisusb_tasklock); KeInitializeSpinLock(&sc->ndisusb_xferdonelock); InitializeListHead(&sc->ndis_shlist); InitializeListHead(&sc->ndisusb_tasklist); InitializeListHead(&sc->ndisusb_xferdonelist); callout_init(&sc->ndis_stat_callout, 1); mbufq_init(&sc->ndis_rxqueue, INT_MAX); /* XXXGL: sane maximum */ if (sc->ndis_iftype == PCMCIABus) { error = ndis_alloc_amem(sc); if (error) { device_printf(dev, "failed to allocate " "attribute memory\n"); goto fail; } } /* Create sysctl registry nodes */ ndis_create_sysctls(sc); /* Find the PDO for this device instance. */ if (sc->ndis_iftype == PCIBus) pdrv = windrv_lookup(0, "PCI Bus"); else if (sc->ndis_iftype == PCMCIABus) pdrv = windrv_lookup(0, "PCCARD Bus"); else pdrv = windrv_lookup(0, "USB Bus"); pdo = windrv_find_pdo(pdrv, dev); /* * Create a new functional device object for this * device. This is what creates the miniport block * for this device instance. */ if (NdisAddDevice(sc->ndis_dobj, pdo) != STATUS_SUCCESS) { device_printf(dev, "failed to create FDO!\n"); error = ENXIO; goto fail; } /* Tell the user what version of the API the driver is using. */ device_printf(dev, "NDIS API version: %d.%d\n", sc->ndis_chars->nmc_version_major, sc->ndis_chars->nmc_version_minor); /* Do resource conversion. */ if (sc->ndis_iftype == PCMCIABus || sc->ndis_iftype == PCIBus) ndis_convert_res(sc); else sc->ndis_block->nmb_rlist = NULL; /* Install our RX and TX interrupt handlers. */ sc->ndis_block->nmb_senddone_func = ndis_txeof_wrap; sc->ndis_block->nmb_pktind_func = ndis_rxeof_wrap; sc->ndis_block->nmb_ethrxindicate_func = ndis_rxeof_eth_wrap; sc->ndis_block->nmb_ethrxdone_func = ndis_rxeof_done_wrap; sc->ndis_block->nmb_tdcond_func = ndis_rxeof_xfr_done_wrap; /* Override the status handler so we can detect link changes. */ sc->ndis_block->nmb_status_func = ndis_linksts_wrap; sc->ndis_block->nmb_statusdone_func = ndis_linksts_done_wrap; /* Set up work item handlers. */ sc->ndis_tickitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj); sc->ndis_startitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj); sc->ndis_resetitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj); sc->ndis_inputitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj); sc->ndisusb_xferdoneitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj); sc->ndisusb_taskitem = IoAllocateWorkItem(sc->ndis_block->nmb_deviceobj); KeInitializeDpc(&sc->ndis_rxdpc, ndis_rxeof_xfr_wrap, sc->ndis_block); /* Call driver's init routine. */ if (ndis_init_nic(sc)) { device_printf(dev, "init handler failed\n"); error = ENXIO; goto fail; } /* * Figure out how big to make the TX buffer pool. */ len = sizeof(sc->ndis_maxpkts); if (ndis_get_info(sc, OID_GEN_MAXIMUM_SEND_PACKETS, &sc->ndis_maxpkts, &len)) { device_printf(dev, "failed to get max TX packets\n"); error = ENXIO; goto fail; } /* * If this is a deserialized miniport, we don't have * to honor the OID_GEN_MAXIMUM_SEND_PACKETS result. */ if (!NDIS_SERIALIZED(sc->ndis_block)) sc->ndis_maxpkts = NDIS_TXPKTS; /* Enforce some sanity, just in case. */ if (sc->ndis_maxpkts == 0) sc->ndis_maxpkts = 10; - sc->ndis_txarray = malloc(sizeof(ndis_packet *) * - sc->ndis_maxpkts, M_DEVBUF, M_NOWAIT|M_ZERO); + sc->ndis_txarray = mallocarray(sc->ndis_maxpkts, + sizeof(ndis_packet *), M_DEVBUF, M_NOWAIT|M_ZERO); /* Allocate a pool of ndis_packets for TX encapsulation. */ NdisAllocatePacketPool(&i, &sc->ndis_txpool, sc->ndis_maxpkts, PROTOCOL_RESERVED_SIZE_IN_PACKET); if (i != NDIS_STATUS_SUCCESS) { sc->ndis_txpool = NULL; device_printf(dev, "failed to allocate TX packet pool"); error = ENOMEM; goto fail; } sc->ndis_txpending = sc->ndis_maxpkts; sc->ndis_oidcnt = 0; /* Get supported oid list. */ ndis_get_supported_oids(sc, &sc->ndis_oids, &sc->ndis_oidcnt); /* If the NDIS module requested scatter/gather, init maps. */ if (sc->ndis_sc) ndis_init_dma(sc); /* * See if the OID_802_11_CONFIGURATION OID is * supported by this driver. If it is, then this an 802.11 * wireless driver, and we should set up media for wireless. */ for (i = 0; i < sc->ndis_oidcnt; i++) if (sc->ndis_oids[i] == OID_802_11_CONFIGURATION) { sc->ndis_80211 = 1; break; } if (sc->ndis_80211) error = ndis_80211attach(sc); else error = ndis_ifattach(sc); fail: if (error) { ndis_detach(dev); return (error); } if (sc->ndis_iftype == PNPBus && ndisusb_halt == 0) return (error); DPRINTF(("attach done.\n")); /* We're done talking to the NIC for now; halt it. */ ndis_halt_nic(sc); DPRINTF(("halting done.\n")); return (error); } static int ndis_80211attach(struct ndis_softc *sc) { struct ieee80211com *ic = &sc->ndis_ic; ndis_80211_rates_ex rates; struct ndis_80211_nettype_list *ntl; uint32_t arg; int mode, i, r, len, nonettypes = 1; uint8_t bands[IEEE80211_MODE_BYTES] = { 0 }; callout_init(&sc->ndis_scan_callout, 1); ic->ic_softc = sc; ic->ic_ioctl = ndis_80211ioctl; ic->ic_name = device_get_nameunit(sc->ndis_dev); ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_DS; ic->ic_caps = IEEE80211_C_8023ENCAP | IEEE80211_C_STA | IEEE80211_C_IBSS; setbit(ic->ic_modecaps, IEEE80211_MODE_AUTO); len = 0; r = ndis_get_info(sc, OID_802_11_NETWORK_TYPES_SUPPORTED, NULL, &len); if (r != ENOSPC) goto nonettypes; ntl = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO); r = ndis_get_info(sc, OID_802_11_NETWORK_TYPES_SUPPORTED, ntl, &len); if (r != 0) { free(ntl, M_DEVBUF); goto nonettypes; } for (i = 0; i < ntl->ntl_items; i++) { mode = ndis_nettype_mode(ntl->ntl_type[i]); if (mode) { nonettypes = 0; setbit(ic->ic_modecaps, mode); setbit(bands, mode); } else device_printf(sc->ndis_dev, "Unknown nettype %d\n", ntl->ntl_type[i]); } free(ntl, M_DEVBUF); nonettypes: /* Default to 11b channels if the card did not supply any */ if (nonettypes) { setbit(ic->ic_modecaps, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11B); } len = sizeof(rates); bzero((char *)&rates, len); r = ndis_get_info(sc, OID_802_11_SUPPORTED_RATES, (void *)rates, &len); if (r != 0) device_printf(sc->ndis_dev, "get rates failed: 0x%x\n", r); /* * Since the supported rates only up to 8 can be supported, * if this is not 802.11b we're just going to be faking it * all up to heck. */ #define TESTSETRATE(x, y) \ do { \ int i; \ for (i = 0; i < ic->ic_sup_rates[x].rs_nrates; i++) { \ if (ic->ic_sup_rates[x].rs_rates[i] == (y)) \ break; \ } \ if (i == ic->ic_sup_rates[x].rs_nrates) { \ ic->ic_sup_rates[x].rs_rates[i] = (y); \ ic->ic_sup_rates[x].rs_nrates++; \ } \ } while (0) #define SETRATE(x, y) \ ic->ic_sup_rates[x].rs_rates[ic->ic_sup_rates[x].rs_nrates] = (y) #define INCRATE(x) \ ic->ic_sup_rates[x].rs_nrates++ ic->ic_curmode = IEEE80211_MODE_AUTO; if (isset(ic->ic_modecaps, IEEE80211_MODE_11A)) ic->ic_sup_rates[IEEE80211_MODE_11A].rs_nrates = 0; if (isset(ic->ic_modecaps, IEEE80211_MODE_11B)) ic->ic_sup_rates[IEEE80211_MODE_11B].rs_nrates = 0; if (isset(ic->ic_modecaps, IEEE80211_MODE_11G)) ic->ic_sup_rates[IEEE80211_MODE_11G].rs_nrates = 0; for (i = 0; i < len; i++) { switch (rates[i] & IEEE80211_RATE_VAL) { case 2: case 4: case 11: case 10: case 22: if (isclr(ic->ic_modecaps, IEEE80211_MODE_11B)) { /* Lazy-init 802.11b. */ setbit(ic->ic_modecaps, IEEE80211_MODE_11B); ic->ic_sup_rates[IEEE80211_MODE_11B]. rs_nrates = 0; } SETRATE(IEEE80211_MODE_11B, rates[i]); INCRATE(IEEE80211_MODE_11B); break; default: if (isset(ic->ic_modecaps, IEEE80211_MODE_11A)) { SETRATE(IEEE80211_MODE_11A, rates[i]); INCRATE(IEEE80211_MODE_11A); } if (isset(ic->ic_modecaps, IEEE80211_MODE_11G)) { SETRATE(IEEE80211_MODE_11G, rates[i]); INCRATE(IEEE80211_MODE_11G); } break; } } /* * If the hardware supports 802.11g, it most * likely supports 802.11b and all of the * 802.11b and 802.11g speeds, so maybe we can * just cheat here. Just how in the heck do * we detect turbo modes, though? */ if (isset(ic->ic_modecaps, IEEE80211_MODE_11B)) { TESTSETRATE(IEEE80211_MODE_11B, IEEE80211_RATE_BASIC|2); TESTSETRATE(IEEE80211_MODE_11B, IEEE80211_RATE_BASIC|4); TESTSETRATE(IEEE80211_MODE_11B, IEEE80211_RATE_BASIC|11); TESTSETRATE(IEEE80211_MODE_11B, IEEE80211_RATE_BASIC|22); } if (isset(ic->ic_modecaps, IEEE80211_MODE_11G)) { TESTSETRATE(IEEE80211_MODE_11G, 48); TESTSETRATE(IEEE80211_MODE_11G, 72); TESTSETRATE(IEEE80211_MODE_11G, 96); TESTSETRATE(IEEE80211_MODE_11G, 108); } if (isset(ic->ic_modecaps, IEEE80211_MODE_11A)) { TESTSETRATE(IEEE80211_MODE_11A, 48); TESTSETRATE(IEEE80211_MODE_11A, 72); TESTSETRATE(IEEE80211_MODE_11A, 96); TESTSETRATE(IEEE80211_MODE_11A, 108); } #undef SETRATE #undef INCRATE #undef TESTSETRATE ieee80211_init_channels(ic, NULL, bands); /* * To test for WPA support, we need to see if we can * set AUTHENTICATION_MODE to WPA and read it back * successfully. */ i = sizeof(arg); arg = NDIS_80211_AUTHMODE_WPA; r = ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &i); if (r == 0) { r = ndis_get_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &i); if (r == 0 && arg == NDIS_80211_AUTHMODE_WPA) ic->ic_caps |= IEEE80211_C_WPA; } /* * To test for supported ciphers, we set each * available encryption type in descending order. * If ENC3 works, then we have WEP, TKIP and AES. * If only ENC2 works, then we have WEP and TKIP. * If only ENC1 works, then we have just WEP. */ i = sizeof(arg); arg = NDIS_80211_WEPSTAT_ENC3ENABLED; r = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &i); if (r == 0) { ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_TKIP | IEEE80211_CRYPTO_AES_CCM; goto got_crypto; } arg = NDIS_80211_WEPSTAT_ENC2ENABLED; r = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &i); if (r == 0) { ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP | IEEE80211_CRYPTO_TKIP; goto got_crypto; } arg = NDIS_80211_WEPSTAT_ENC1ENABLED; r = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &i); if (r == 0) ic->ic_cryptocaps |= IEEE80211_CRYPTO_WEP; got_crypto: i = sizeof(arg); r = ndis_get_info(sc, OID_802_11_POWER_MODE, &arg, &i); if (r == 0) ic->ic_caps |= IEEE80211_C_PMGT; r = ndis_get_info(sc, OID_802_11_TX_POWER_LEVEL, &arg, &i); if (r == 0) ic->ic_caps |= IEEE80211_C_TXPMGT; /* * Get station address from the driver. */ len = sizeof(ic->ic_macaddr); ndis_get_info(sc, OID_802_3_CURRENT_ADDRESS, &ic->ic_macaddr, &len); ieee80211_ifattach(ic); ic->ic_raw_xmit = ndis_raw_xmit; ic->ic_scan_start = ndis_scan_start; ic->ic_scan_end = ndis_scan_end; ic->ic_set_channel = ndis_set_channel; ic->ic_scan_curchan = ndis_scan_curchan; ic->ic_scan_mindwell = ndis_scan_mindwell; ic->ic_bsschan = IEEE80211_CHAN_ANYC; ic->ic_vap_create = ndis_vap_create; ic->ic_vap_delete = ndis_vap_delete; ic->ic_update_mcast = ndis_update_mcast; ic->ic_update_promisc = ndis_update_promisc; ic->ic_transmit = ndis_80211transmit; ic->ic_parent = ndis_80211parent; if (bootverbose) ieee80211_announce(ic); return (0); } static int ndis_ifattach(struct ndis_softc *sc) { struct ifnet *ifp; u_char eaddr[ETHER_ADDR_LEN]; int len; ifp = if_alloc(IFT_ETHER); if (ifp == NULL) return (ENOSPC); sc->ifp = ifp; ifp->if_softc = sc; /* Check for task offload support. */ ndis_probe_offload(sc); /* * Get station address from the driver. */ len = sizeof(eaddr); ndis_get_info(sc, OID_802_3_CURRENT_ADDRESS, eaddr, &len); if_initname(ifp, device_get_name(sc->ndis_dev), device_get_unit(sc->ndis_dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ndis_ifioctl; ifp->if_start = ndis_ifstart; ifp->if_init = ndis_init; ifp->if_baudrate = 10000000; IFQ_SET_MAXLEN(&ifp->if_snd, 50); ifp->if_snd.ifq_drv_maxlen = 25; IFQ_SET_READY(&ifp->if_snd); ifp->if_capenable = ifp->if_capabilities; ifp->if_hwassist = sc->ndis_hwassist; ifmedia_init(&sc->ifmedia, IFM_IMASK, ndis_ifmedia_upd, ndis_ifmedia_sts); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); ether_ifattach(ifp, eaddr); return (0); } static struct ieee80211vap * ndis_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct ndis_vap *nvp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; nvp = malloc(sizeof(struct ndis_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &nvp->vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override with driver methods */ nvp->newstate = vap->iv_newstate; vap->iv_newstate = ndis_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ndis_media_status, mac); ic->ic_opmode = opmode; /* install key handing routines */ vap->iv_key_set = ndis_add_key; vap->iv_key_delete = ndis_del_key; return vap; } static void ndis_vap_delete(struct ieee80211vap *vap) { struct ndis_vap *nvp = NDIS_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ndis_softc *sc = ic->ic_softc; ndis_stop(sc); callout_drain(&sc->ndis_scan_callout); ieee80211_vap_detach(vap); free(nvp, M_80211_VAP); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ int ndis_detach(device_t dev) { struct ifnet *ifp; struct ndis_softc *sc; driver_object *drv; sc = device_get_softc(dev); NDIS_LOCK(sc); if (!sc->ndis_80211) ifp = sc->ifp; else ifp = NULL; if (ifp != NULL) ifp->if_flags &= ~IFF_UP; if (device_is_attached(dev)) { NDIS_UNLOCK(sc); ndis_stop(sc); if (sc->ndis_80211) ieee80211_ifdetach(&sc->ndis_ic); else if (ifp != NULL) ether_ifdetach(ifp); } else NDIS_UNLOCK(sc); if (sc->ndis_tickitem != NULL) IoFreeWorkItem(sc->ndis_tickitem); if (sc->ndis_startitem != NULL) IoFreeWorkItem(sc->ndis_startitem); if (sc->ndis_resetitem != NULL) IoFreeWorkItem(sc->ndis_resetitem); if (sc->ndis_inputitem != NULL) IoFreeWorkItem(sc->ndis_inputitem); if (sc->ndisusb_xferdoneitem != NULL) IoFreeWorkItem(sc->ndisusb_xferdoneitem); if (sc->ndisusb_taskitem != NULL) IoFreeWorkItem(sc->ndisusb_taskitem); bus_generic_detach(dev); ndis_unload_driver(sc); if (sc->ndis_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->ndis_irq); if (sc->ndis_res_io) bus_release_resource(dev, SYS_RES_IOPORT, sc->ndis_io_rid, sc->ndis_res_io); if (sc->ndis_res_mem) bus_release_resource(dev, SYS_RES_MEMORY, sc->ndis_mem_rid, sc->ndis_res_mem); if (sc->ndis_res_altmem) bus_release_resource(dev, SYS_RES_MEMORY, sc->ndis_altmem_rid, sc->ndis_res_altmem); if (ifp != NULL) if_free(ifp); if (sc->ndis_iftype == PCMCIABus) ndis_free_amem(sc); if (sc->ndis_sc) ndis_destroy_dma(sc); if (sc->ndis_txarray) free(sc->ndis_txarray, M_DEVBUF); if (!sc->ndis_80211) ifmedia_removeall(&sc->ifmedia); if (sc->ndis_txpool != NULL) NdisFreePacketPool(sc->ndis_txpool); /* Destroy the PDO for this device. */ if (sc->ndis_iftype == PCIBus) drv = windrv_lookup(0, "PCI Bus"); else if (sc->ndis_iftype == PCMCIABus) drv = windrv_lookup(0, "PCCARD Bus"); else drv = windrv_lookup(0, "USB Bus"); if (drv == NULL) panic("couldn't find driver object"); windrv_destroy_pdo(drv, dev); if (sc->ndis_iftype == PCIBus) bus_dma_tag_destroy(sc->ndis_parent_tag); return (0); } int ndis_suspend(dev) device_t dev; { struct ndis_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->ifp; #ifdef notdef if (NDIS_INITIALIZED(sc)) ndis_stop(sc); #endif return (0); } int ndis_resume(dev) device_t dev; { struct ndis_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->ifp; if (NDIS_INITIALIZED(sc)) ndis_init(sc); return (0); } /* * The following bunch of routines are here to support drivers that * use the NdisMEthIndicateReceive()/MiniportTransferData() mechanism. * The NdisMEthIndicateReceive() handler runs at DISPATCH_LEVEL for * serialized miniports, or IRQL <= DISPATCH_LEVEL for deserialized * miniports. */ static void ndis_rxeof_eth(adapter, ctx, addr, hdr, hdrlen, lookahead, lookaheadlen, pktlen) ndis_handle adapter; ndis_handle ctx; char *addr; void *hdr; uint32_t hdrlen; void *lookahead; uint32_t lookaheadlen; uint32_t pktlen; { ndis_miniport_block *block; uint8_t irql = 0; uint32_t status; ndis_buffer *b; ndis_packet *p; struct mbuf *m; ndis_ethpriv *priv; block = adapter; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return; /* Save the data provided to us so far. */ m->m_len = lookaheadlen + hdrlen; m->m_pkthdr.len = pktlen + hdrlen; m->m_next = NULL; m_copyback(m, 0, hdrlen, hdr); m_copyback(m, hdrlen, lookaheadlen, lookahead); /* Now create a fake NDIS_PACKET to hold the data */ NdisAllocatePacket(&status, &p, block->nmb_rxpool); if (status != NDIS_STATUS_SUCCESS) { m_freem(m); return; } p->np_m0 = m; b = IoAllocateMdl(m->m_data, m->m_pkthdr.len, FALSE, FALSE, NULL); if (b == NULL) { NdisFreePacket(p); m_freem(m); return; } p->np_private.npp_head = p->np_private.npp_tail = b; p->np_private.npp_totlen = m->m_pkthdr.len; /* Save the packet RX context somewhere. */ priv = (ndis_ethpriv *)&p->np_protocolreserved; priv->nep_ctx = ctx; if (!NDIS_SERIALIZED(block)) KeAcquireSpinLock(&block->nmb_lock, &irql); InsertTailList((&block->nmb_packetlist), (&p->np_list)); if (!NDIS_SERIALIZED(block)) KeReleaseSpinLock(&block->nmb_lock, irql); } /* * NdisMEthIndicateReceiveComplete() handler, runs at DISPATCH_LEVEL * for serialized miniports, or IRQL <= DISPATCH_LEVEL for deserialized * miniports. */ static void ndis_rxeof_done(adapter) ndis_handle adapter; { struct ndis_softc *sc; ndis_miniport_block *block; block = adapter; /* Schedule transfer/RX of queued packets. */ sc = device_get_softc(block->nmb_physdeviceobj->do_devext); KeInsertQueueDpc(&sc->ndis_rxdpc, NULL, NULL); } /* * MiniportTransferData() handler, runs at DISPATCH_LEVEL. */ static void ndis_rxeof_xfr(dpc, adapter, sysarg1, sysarg2) kdpc *dpc; ndis_handle adapter; void *sysarg1; void *sysarg2; { ndis_miniport_block *block; struct ndis_softc *sc; ndis_packet *p; list_entry *l; uint32_t status; ndis_ethpriv *priv; struct ifnet *ifp; struct mbuf *m; block = adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); ifp = sc->ifp; KeAcquireSpinLockAtDpcLevel(&block->nmb_lock); l = block->nmb_packetlist.nle_flink; while(!IsListEmpty(&block->nmb_packetlist)) { l = RemoveHeadList((&block->nmb_packetlist)); p = CONTAINING_RECORD(l, ndis_packet, np_list); InitializeListHead((&p->np_list)); priv = (ndis_ethpriv *)&p->np_protocolreserved; m = p->np_m0; p->np_softc = sc; p->np_m0 = NULL; KeReleaseSpinLockFromDpcLevel(&block->nmb_lock); status = MSCALL6(sc->ndis_chars->nmc_transferdata_func, p, &p->np_private.npp_totlen, block, priv->nep_ctx, m->m_len, m->m_pkthdr.len - m->m_len); KeAcquireSpinLockAtDpcLevel(&block->nmb_lock); /* * If status is NDIS_STATUS_PENDING, do nothing and * wait for a callback to the ndis_rxeof_xfr_done() * handler. */ m->m_len = m->m_pkthdr.len; m->m_pkthdr.rcvif = ifp; if (status == NDIS_STATUS_SUCCESS) { IoFreeMdl(p->np_private.npp_head); NdisFreePacket(p); KeAcquireSpinLockAtDpcLevel(&sc->ndis_rxlock); mbufq_enqueue(&sc->ndis_rxqueue, m); KeReleaseSpinLockFromDpcLevel(&sc->ndis_rxlock); IoQueueWorkItem(sc->ndis_inputitem, (io_workitem_func)ndis_inputtask_wrap, WORKQUEUE_CRITICAL, sc); } if (status == NDIS_STATUS_FAILURE) m_freem(m); /* Advance to next packet */ l = block->nmb_packetlist.nle_flink; } KeReleaseSpinLockFromDpcLevel(&block->nmb_lock); } /* * NdisMTransferDataComplete() handler, runs at DISPATCH_LEVEL. */ static void ndis_rxeof_xfr_done(adapter, packet, status, len) ndis_handle adapter; ndis_packet *packet; uint32_t status; uint32_t len; { ndis_miniport_block *block; struct ndis_softc *sc; struct ifnet *ifp; struct mbuf *m; block = adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); ifp = sc->ifp; m = packet->np_m0; IoFreeMdl(packet->np_private.npp_head); NdisFreePacket(packet); if (status != NDIS_STATUS_SUCCESS) { m_freem(m); return; } m->m_len = m->m_pkthdr.len; m->m_pkthdr.rcvif = ifp; KeAcquireSpinLockAtDpcLevel(&sc->ndis_rxlock); mbufq_enqueue(&sc->ndis_rxqueue, m); KeReleaseSpinLockFromDpcLevel(&sc->ndis_rxlock); IoQueueWorkItem(sc->ndis_inputitem, (io_workitem_func)ndis_inputtask_wrap, WORKQUEUE_CRITICAL, sc); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. * * When handling received NDIS packets, the 'status' field in the * out-of-band portion of the ndis_packet has special meaning. In the * most common case, the underlying NDIS driver will set this field * to NDIS_STATUS_SUCCESS, which indicates that it's ok for us to * take possession of it. We then change the status field to * NDIS_STATUS_PENDING to tell the driver that we now own the packet, * and that we will return it at some point in the future via the * return packet handler. * * If the driver hands us a packet with a status of NDIS_STATUS_RESOURCES, * this means the driver is running out of packet/buffer resources and * wants to maintain ownership of the packet. In this case, we have to * copy the packet data into local storage and let the driver keep the * packet. */ static void ndis_rxeof(adapter, packets, pktcnt) ndis_handle adapter; ndis_packet **packets; uint32_t pktcnt; { struct ndis_softc *sc; ndis_miniport_block *block; ndis_packet *p; uint32_t s; ndis_tcpip_csum *csum; struct ifnet *ifp; struct mbuf *m0, *m; int i; block = (ndis_miniport_block *)adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); ifp = sc->ifp; /* * There's a slim chance the driver may indicate some packets * before we're completely ready to handle them. If we detect this, * we need to return them to the miniport and ignore them. */ if (!sc->ndis_running) { for (i = 0; i < pktcnt; i++) { p = packets[i]; if (p->np_oob.npo_status == NDIS_STATUS_SUCCESS) { p->np_refcnt++; ndis_return_packet(p); } } return; } for (i = 0; i < pktcnt; i++) { p = packets[i]; /* Stash the softc here so ptom can use it. */ p->np_softc = sc; if (ndis_ptom(&m0, p)) { device_printf(sc->ndis_dev, "ptom failed\n"); if (p->np_oob.npo_status == NDIS_STATUS_SUCCESS) ndis_return_packet(p); } else { #ifdef notdef if (p->np_oob.npo_status == NDIS_STATUS_RESOURCES) { m = m_dup(m0, M_NOWAIT); /* * NOTE: we want to destroy the mbuf here, but * we don't actually want to return it to the * driver via the return packet handler. By * bumping np_refcnt, we can prevent the * ndis_return_packet() routine from actually * doing anything. */ p->np_refcnt++; m_freem(m0); if (m == NULL) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); else m0 = m; } else p->np_oob.npo_status = NDIS_STATUS_PENDING; #endif m = m_dup(m0, M_NOWAIT); if (p->np_oob.npo_status == NDIS_STATUS_RESOURCES) p->np_refcnt++; else p->np_oob.npo_status = NDIS_STATUS_PENDING; m_freem(m0); if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); continue; } m0 = m; m0->m_pkthdr.rcvif = ifp; /* Deal with checksum offload. */ if (ifp->if_capenable & IFCAP_RXCSUM && p->np_ext.npe_info[ndis_tcpipcsum_info] != NULL) { s = (uintptr_t) p->np_ext.npe_info[ndis_tcpipcsum_info]; csum = (ndis_tcpip_csum *)&s; if (csum->u.ntc_rxflags & NDIS_RXCSUM_IP_PASSED) m0->m_pkthdr.csum_flags |= CSUM_IP_CHECKED|CSUM_IP_VALID; if (csum->u.ntc_rxflags & (NDIS_RXCSUM_TCP_PASSED | NDIS_RXCSUM_UDP_PASSED)) { m0->m_pkthdr.csum_flags |= CSUM_DATA_VALID|CSUM_PSEUDO_HDR; m0->m_pkthdr.csum_data = 0xFFFF; } } KeAcquireSpinLockAtDpcLevel(&sc->ndis_rxlock); mbufq_enqueue(&sc->ndis_rxqueue, m0); KeReleaseSpinLockFromDpcLevel(&sc->ndis_rxlock); IoQueueWorkItem(sc->ndis_inputitem, (io_workitem_func)ndis_inputtask_wrap, WORKQUEUE_CRITICAL, sc); } } } /* * This routine is run at PASSIVE_LEVEL. We use this routine to pass * packets into the stack in order to avoid calling (*ifp->if_input)() * with any locks held (at DISPATCH_LEVEL, we'll be holding the * 'dispatch level' per-cpu sleep lock). */ static void ndis_inputtask(device_object *dobj, void *arg) { ndis_miniport_block *block; struct ndis_softc *sc = arg; struct mbuf *m; uint8_t irql; block = dobj->do_devext; KeAcquireSpinLock(&sc->ndis_rxlock, &irql); while ((m = mbufq_dequeue(&sc->ndis_rxqueue)) != NULL) { KeReleaseSpinLock(&sc->ndis_rxlock, irql); if ((sc->ndis_80211 != 0)) { struct ieee80211com *ic = &sc->ndis_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (vap != NULL) vap->iv_deliver_data(vap, vap->iv_bss, m); } else { struct ifnet *ifp = sc->ifp; (*ifp->if_input)(ifp, m); } KeAcquireSpinLock(&sc->ndis_rxlock, &irql); } KeReleaseSpinLock(&sc->ndis_rxlock, irql); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void ndis_txeof(adapter, packet, status) ndis_handle adapter; ndis_packet *packet; ndis_status status; { struct ndis_softc *sc; ndis_miniport_block *block; struct ifnet *ifp; int idx; struct mbuf *m; block = (ndis_miniport_block *)adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); ifp = sc->ifp; m = packet->np_m0; idx = packet->np_txidx; if (sc->ndis_sc) bus_dmamap_unload(sc->ndis_ttag, sc->ndis_tmaps[idx]); ndis_free_packet(packet); m_freem(m); NDIS_LOCK(sc); sc->ndis_txarray[idx] = NULL; sc->ndis_txpending++; if (!sc->ndis_80211) { struct ifnet *ifp = sc->ifp; if (status == NDIS_STATUS_SUCCESS) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); else if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->ndis_tx_timer = 0; NDIS_UNLOCK(sc); if (!sc->ndis_80211) IoQueueWorkItem(sc->ndis_startitem, (io_workitem_func)ndis_ifstarttask_wrap, WORKQUEUE_CRITICAL, sc); DPRINTF(("%s: ndis_ifstarttask_wrap sc=%p\n", __func__, sc)); } static void ndis_linksts(adapter, status, sbuf, slen) ndis_handle adapter; ndis_status status; void *sbuf; uint32_t slen; { ndis_miniport_block *block; struct ndis_softc *sc; block = adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); sc->ndis_sts = status; /* Event list is all full up, drop this one. */ NDIS_LOCK(sc); if (sc->ndis_evt[sc->ndis_evtpidx].ne_sts) { NDIS_UNLOCK(sc); return; } /* Cache the event. */ if (slen) { sc->ndis_evt[sc->ndis_evtpidx].ne_buf = malloc(slen, M_TEMP, M_NOWAIT); if (sc->ndis_evt[sc->ndis_evtpidx].ne_buf == NULL) { NDIS_UNLOCK(sc); return; } bcopy((char *)sbuf, sc->ndis_evt[sc->ndis_evtpidx].ne_buf, slen); } sc->ndis_evt[sc->ndis_evtpidx].ne_sts = status; sc->ndis_evt[sc->ndis_evtpidx].ne_len = slen; NDIS_EVTINC(sc->ndis_evtpidx); NDIS_UNLOCK(sc); } static void ndis_linksts_done(adapter) ndis_handle adapter; { ndis_miniport_block *block; struct ndis_softc *sc; struct ifnet *ifp; block = adapter; sc = device_get_softc(block->nmb_physdeviceobj->do_devext); ifp = sc->ifp; if (!NDIS_INITIALIZED(sc)) return; switch (sc->ndis_sts) { case NDIS_STATUS_MEDIA_CONNECT: IoQueueWorkItem(sc->ndis_tickitem, (io_workitem_func)ndis_ticktask_wrap, WORKQUEUE_CRITICAL, sc); if (!sc->ndis_80211) IoQueueWorkItem(sc->ndis_startitem, (io_workitem_func)ndis_ifstarttask_wrap, WORKQUEUE_CRITICAL, sc); break; case NDIS_STATUS_MEDIA_DISCONNECT: if (sc->ndis_link) IoQueueWorkItem(sc->ndis_tickitem, (io_workitem_func)ndis_ticktask_wrap, WORKQUEUE_CRITICAL, sc); break; default: break; } } static void ndis_tick(xsc) void *xsc; { struct ndis_softc *sc; sc = xsc; if (sc->ndis_hang_timer && --sc->ndis_hang_timer == 0) { IoQueueWorkItem(sc->ndis_tickitem, (io_workitem_func)ndis_ticktask_wrap, WORKQUEUE_CRITICAL, sc); sc->ndis_hang_timer = sc->ndis_block->nmb_checkforhangsecs; } if (sc->ndis_tx_timer && --sc->ndis_tx_timer == 0) { if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); device_printf(sc->ndis_dev, "watchdog timeout\n"); IoQueueWorkItem(sc->ndis_resetitem, (io_workitem_func)ndis_resettask_wrap, WORKQUEUE_CRITICAL, sc); if (!sc->ndis_80211) IoQueueWorkItem(sc->ndis_startitem, (io_workitem_func)ndis_ifstarttask_wrap, WORKQUEUE_CRITICAL, sc); } callout_reset(&sc->ndis_stat_callout, hz, ndis_tick, sc); } static void ndis_ticktask(device_object *d, void *xsc) { struct ndis_softc *sc = xsc; ndis_checkforhang_handler hangfunc; uint8_t rval; NDIS_LOCK(sc); if (!NDIS_INITIALIZED(sc)) { NDIS_UNLOCK(sc); return; } NDIS_UNLOCK(sc); hangfunc = sc->ndis_chars->nmc_checkhang_func; if (hangfunc != NULL) { rval = MSCALL1(hangfunc, sc->ndis_block->nmb_miniportadapterctx); if (rval == TRUE) { ndis_reset_nic(sc); return; } } NDIS_LOCK(sc); if (sc->ndis_link == 0 && sc->ndis_sts == NDIS_STATUS_MEDIA_CONNECT) { sc->ndis_link = 1; if (sc->ndis_80211 != 0) { struct ieee80211com *ic = &sc->ndis_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (vap != NULL) { NDIS_UNLOCK(sc); ndis_getstate_80211(sc); ieee80211_new_state(vap, IEEE80211_S_RUN, -1); NDIS_LOCK(sc); if_link_state_change(vap->iv_ifp, LINK_STATE_UP); } } else if_link_state_change(sc->ifp, LINK_STATE_UP); } if (sc->ndis_link == 1 && sc->ndis_sts == NDIS_STATUS_MEDIA_DISCONNECT) { sc->ndis_link = 0; if (sc->ndis_80211 != 0) { struct ieee80211com *ic = &sc->ndis_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); if (vap != NULL) { NDIS_UNLOCK(sc); ieee80211_new_state(vap, IEEE80211_S_SCAN, 0); NDIS_LOCK(sc); if_link_state_change(vap->iv_ifp, LINK_STATE_DOWN); } } else if_link_state_change(sc->ifp, LINK_STATE_DOWN); } NDIS_UNLOCK(sc); } static void ndis_map_sclist(arg, segs, nseg, mapsize, error) void *arg; bus_dma_segment_t *segs; int nseg; bus_size_t mapsize; int error; { struct ndis_sc_list *sclist; int i; if (error || arg == NULL) return; sclist = arg; sclist->nsl_frags = nseg; for (i = 0; i < nseg; i++) { sclist->nsl_elements[i].nse_addr.np_quad = segs[i].ds_addr; sclist->nsl_elements[i].nse_len = segs[i].ds_len; } } static int ndis_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { /* no support; just discard */ m_freem(m); ieee80211_free_node(ni); return (0); } static void ndis_update_mcast(struct ieee80211com *ic) { struct ndis_softc *sc = ic->ic_softc; ndis_setmulti(sc); } static void ndis_update_promisc(struct ieee80211com *ic) { /* not supported */ } static void ndis_ifstarttask(device_object *d, void *arg) { struct ndis_softc *sc = arg; DPRINTF(("%s: sc=%p, ifp=%p\n", __func__, sc, sc->ifp)); if (sc->ndis_80211) return; struct ifnet *ifp = sc->ifp; if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) ndis_ifstart(ifp); } /* * Main transmit routine. To make NDIS drivers happy, we need to * transform mbuf chains into NDIS packets and feed them to the * send packet routines. Most drivers allow you to send several * packets at once (up to the maxpkts limit). Unfortunately, rather * that accepting them in the form of a linked list, they expect * a contiguous array of pointers to packets. * * For those drivers which use the NDIS scatter/gather DMA mechanism, * we need to perform busdma work here. Those that use map registers * will do the mapping themselves on a buffer by buffer basis. */ static void ndis_ifstart(struct ifnet *ifp) { struct ndis_softc *sc; struct mbuf *m = NULL; ndis_packet **p0 = NULL, *p = NULL; ndis_tcpip_csum *csum; int pcnt = 0, status; sc = ifp->if_softc; NDIS_LOCK(sc); if (!sc->ndis_link || ifp->if_drv_flags & IFF_DRV_OACTIVE) { NDIS_UNLOCK(sc); return; } p0 = &sc->ndis_txarray[sc->ndis_txidx]; while(sc->ndis_txpending) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) break; NdisAllocatePacket(&status, &sc->ndis_txarray[sc->ndis_txidx], sc->ndis_txpool); if (status != NDIS_STATUS_SUCCESS) break; if (ndis_mtop(m, &sc->ndis_txarray[sc->ndis_txidx])) { IFQ_DRV_PREPEND(&ifp->if_snd, m); NDIS_UNLOCK(sc); return; } /* * Save pointer to original mbuf * so we can free it later. */ p = sc->ndis_txarray[sc->ndis_txidx]; p->np_txidx = sc->ndis_txidx; p->np_m0 = m; p->np_oob.npo_status = NDIS_STATUS_PENDING; /* * Do scatter/gather processing, if driver requested it. */ if (sc->ndis_sc) { bus_dmamap_load_mbuf(sc->ndis_ttag, sc->ndis_tmaps[sc->ndis_txidx], m, ndis_map_sclist, &p->np_sclist, BUS_DMA_NOWAIT); bus_dmamap_sync(sc->ndis_ttag, sc->ndis_tmaps[sc->ndis_txidx], BUS_DMASYNC_PREREAD); p->np_ext.npe_info[ndis_sclist_info] = &p->np_sclist; } /* Handle checksum offload. */ if (ifp->if_capenable & IFCAP_TXCSUM && m->m_pkthdr.csum_flags) { csum = (ndis_tcpip_csum *) &p->np_ext.npe_info[ndis_tcpipcsum_info]; csum->u.ntc_txflags = NDIS_TXCSUM_DO_IPV4; if (m->m_pkthdr.csum_flags & CSUM_IP) csum->u.ntc_txflags |= NDIS_TXCSUM_DO_IP; if (m->m_pkthdr.csum_flags & CSUM_TCP) csum->u.ntc_txflags |= NDIS_TXCSUM_DO_TCP; if (m->m_pkthdr.csum_flags & CSUM_UDP) csum->u.ntc_txflags |= NDIS_TXCSUM_DO_UDP; p->np_private.npp_flags = NDIS_PROTOCOL_ID_TCP_IP; } NDIS_INC(sc); sc->ndis_txpending--; pcnt++; /* * If there's a BPF listener, bounce a copy of this frame * to him. */ if (!sc->ndis_80211) /* XXX handle 80211 */ BPF_MTAP(ifp, m); /* * The array that p0 points to must appear contiguous, * so we must not wrap past the end of sc->ndis_txarray[]. * If it looks like we're about to wrap, break out here * so the this batch of packets can be transmitted, then * wait for txeof to ask us to send the rest. */ if (sc->ndis_txidx == 0) break; } if (pcnt == 0) { NDIS_UNLOCK(sc); return; } if (sc->ndis_txpending == 0) ifp->if_drv_flags |= IFF_DRV_OACTIVE; /* * Set a timeout in case the chip goes out to lunch. */ sc->ndis_tx_timer = 5; NDIS_UNLOCK(sc); /* * According to NDIS documentation, if a driver exports * a MiniportSendPackets() routine, we prefer that over * a MiniportSend() routine (which sends just a single * packet). */ if (sc->ndis_chars->nmc_sendmulti_func != NULL) ndis_send_packets(sc, p0, pcnt); else ndis_send_packet(sc, p); return; } static int ndis_80211transmit(struct ieee80211com *ic, struct mbuf *m) { struct ndis_softc *sc = ic->ic_softc; ndis_packet **p0 = NULL, *p = NULL; int status; NDIS_LOCK(sc); if (!sc->ndis_link || !sc->ndis_running) { NDIS_UNLOCK(sc); return (ENXIO); } if (sc->ndis_txpending == 0) { NDIS_UNLOCK(sc); return (ENOBUFS); } p0 = &sc->ndis_txarray[sc->ndis_txidx]; NdisAllocatePacket(&status, &sc->ndis_txarray[sc->ndis_txidx], sc->ndis_txpool); if (status != NDIS_STATUS_SUCCESS) { NDIS_UNLOCK(sc); return (ENOBUFS); } if (ndis_mtop(m, &sc->ndis_txarray[sc->ndis_txidx])) { NDIS_UNLOCK(sc); return (ENOBUFS); } /* * Save pointer to original mbuf * so we can free it later. */ p = sc->ndis_txarray[sc->ndis_txidx]; p->np_txidx = sc->ndis_txidx; p->np_m0 = m; p->np_oob.npo_status = NDIS_STATUS_PENDING; /* * Do scatter/gather processing, if driver requested it. */ if (sc->ndis_sc) { bus_dmamap_load_mbuf(sc->ndis_ttag, sc->ndis_tmaps[sc->ndis_txidx], m, ndis_map_sclist, &p->np_sclist, BUS_DMA_NOWAIT); bus_dmamap_sync(sc->ndis_ttag, sc->ndis_tmaps[sc->ndis_txidx], BUS_DMASYNC_PREREAD); p->np_ext.npe_info[ndis_sclist_info] = &p->np_sclist; } NDIS_INC(sc); sc->ndis_txpending--; /* * Set a timeout in case the chip goes out to lunch. */ sc->ndis_tx_timer = 5; NDIS_UNLOCK(sc); /* * According to NDIS documentation, if a driver exports * a MiniportSendPackets() routine, we prefer that over * a MiniportSend() routine (which sends just a single * packet). */ if (sc->ndis_chars->nmc_sendmulti_func != NULL) ndis_send_packets(sc, p0, 1); else ndis_send_packet(sc, p); return (0); } static void ndis_80211parent(struct ieee80211com *ic) { struct ndis_softc *sc = ic->ic_softc; /*NDIS_LOCK(sc);*/ if (ic->ic_nrunning > 0) { if (!sc->ndis_running) ndis_init(sc); } else if (sc->ndis_running) ndis_stop(sc); /*NDIS_UNLOCK(sc);*/ } static void ndis_init(void *xsc) { struct ndis_softc *sc = xsc; int i, len, error; /* * Avoid reintializing the link unnecessarily. * This should be dealt with in a better way by * fixing the upper layer modules so they don't * call ifp->if_init() quite as often. */ if (sc->ndis_link) return; /* * Cancel pending I/O and free all RX/TX buffers. */ ndis_stop(sc); if (!(sc->ndis_iftype == PNPBus && ndisusb_halt == 0)) { error = ndis_init_nic(sc); if (error != 0) { device_printf(sc->ndis_dev, "failed to initialize the device: %d\n", error); return; } } /* Program the packet filter */ sc->ndis_filter = NDIS_PACKET_TYPE_DIRECTED | NDIS_PACKET_TYPE_BROADCAST; if (sc->ndis_80211) { struct ieee80211com *ic = &sc->ndis_ic; if (ic->ic_promisc > 0) sc->ndis_filter |= NDIS_PACKET_TYPE_PROMISCUOUS; } else { struct ifnet *ifp = sc->ifp; if (ifp->if_flags & IFF_PROMISC) sc->ndis_filter |= NDIS_PACKET_TYPE_PROMISCUOUS; } len = sizeof(sc->ndis_filter); error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER, &sc->ndis_filter, &len); if (error) device_printf(sc->ndis_dev, "set filter failed: %d\n", error); /* * Set lookahead. */ if (sc->ndis_80211) i = ETHERMTU; else i = sc->ifp->if_mtu; len = sizeof(i); ndis_set_info(sc, OID_GEN_CURRENT_LOOKAHEAD, &i, &len); /* * Program the multicast filter, if necessary. */ ndis_setmulti(sc); /* Setup task offload. */ ndis_set_offload(sc); NDIS_LOCK(sc); sc->ndis_txidx = 0; sc->ndis_txpending = sc->ndis_maxpkts; sc->ndis_link = 0; if (!sc->ndis_80211) { if_link_state_change(sc->ifp, LINK_STATE_UNKNOWN); sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->ndis_tx_timer = 0; /* * Some drivers don't set this value. The NDIS spec says * the default checkforhang timeout is "approximately 2 * seconds." We use 3 seconds, because it seems for some * drivers, exactly 2 seconds is too fast. */ if (sc->ndis_block->nmb_checkforhangsecs == 0) sc->ndis_block->nmb_checkforhangsecs = 3; sc->ndis_hang_timer = sc->ndis_block->nmb_checkforhangsecs; callout_reset(&sc->ndis_stat_callout, hz, ndis_tick, sc); sc->ndis_running = 1; NDIS_UNLOCK(sc); /* XXX force handling */ if (sc->ndis_80211) ieee80211_start_all(&sc->ndis_ic); /* start all vap's */ } /* * Set media options. */ static int ndis_ifmedia_upd(ifp) struct ifnet *ifp; { struct ndis_softc *sc; sc = ifp->if_softc; if (NDIS_INITIALIZED(sc)) ndis_init(sc); return (0); } /* * Report current media status. */ static void ndis_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct ndis_softc *sc; uint32_t media_info; ndis_media_state linkstate; int len; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; sc = ifp->if_softc; if (!NDIS_INITIALIZED(sc)) return; len = sizeof(linkstate); ndis_get_info(sc, OID_GEN_MEDIA_CONNECT_STATUS, (void *)&linkstate, &len); len = sizeof(media_info); ndis_get_info(sc, OID_GEN_LINK_SPEED, (void *)&media_info, &len); if (linkstate == nmc_connected) ifmr->ifm_status |= IFM_ACTIVE; switch (media_info) { case 100000: ifmr->ifm_active |= IFM_10_T; break; case 1000000: ifmr->ifm_active |= IFM_100_TX; break; case 10000000: ifmr->ifm_active |= IFM_1000_T; break; default: device_printf(sc->ndis_dev, "unknown speed: %d\n", media_info); break; } } static int ndis_set_cipher(struct ndis_softc *sc, int cipher) { struct ieee80211com *ic = &sc->ndis_ic; int rval = 0, len; uint32_t arg, save; len = sizeof(arg); if (cipher == WPA_CSE_WEP40 || cipher == WPA_CSE_WEP104) { if (!(ic->ic_cryptocaps & IEEE80211_CRYPTO_WEP)) return (ENOTSUP); arg = NDIS_80211_WEPSTAT_ENC1ENABLED; } if (cipher == WPA_CSE_TKIP) { if (!(ic->ic_cryptocaps & IEEE80211_CRYPTO_TKIP)) return (ENOTSUP); arg = NDIS_80211_WEPSTAT_ENC2ENABLED; } if (cipher == WPA_CSE_CCMP) { if (!(ic->ic_cryptocaps & IEEE80211_CRYPTO_AES_CCM)) return (ENOTSUP); arg = NDIS_80211_WEPSTAT_ENC3ENABLED; } DPRINTF(("Setting cipher to %d\n", arg)); save = arg; rval = ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &len); if (rval) return (rval); /* Check that the cipher was set correctly. */ len = sizeof(save); rval = ndis_get_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &len); if (rval != 0 || arg != save) return (ENODEV); return (0); } /* * WPA is hairy to set up. Do the work in a separate routine * so we don't clutter the setstate function too much. * Important yet undocumented fact: first we have to set the * authentication mode, _then_ we enable the ciphers. If one * of the WPA authentication modes isn't enabled, the driver * might not permit the TKIP or AES ciphers to be selected. */ static int ndis_set_wpa(sc, ie, ielen) struct ndis_softc *sc; void *ie; int ielen; { struct ieee80211_ie_wpa *w; struct ndis_ie *n; char *pos; uint32_t arg; int i; /* * Apparently, the only way for us to know what ciphers * and key management/authentication mode to use is for * us to inspect the optional information element (IE) * stored in the 802.11 state machine. This IE should be * supplied by the WPA supplicant. */ w = (struct ieee80211_ie_wpa *)ie; /* Check for the right kind of IE. */ if (w->wpa_id != IEEE80211_ELEMID_VENDOR) { DPRINTF(("Incorrect IE type %d\n", w->wpa_id)); return (EINVAL); } /* Skip over the ucast cipher OIDs. */ pos = (char *)&w->wpa_uciphers[0]; pos += w->wpa_uciphercnt * sizeof(struct ndis_ie); /* Skip over the authmode count. */ pos += sizeof(u_int16_t); /* * Check for the authentication modes. I'm * pretty sure there's only supposed to be one. */ n = (struct ndis_ie *)pos; if (n->ni_val == WPA_ASE_NONE) arg = NDIS_80211_AUTHMODE_WPANONE; if (n->ni_val == WPA_ASE_8021X_UNSPEC) arg = NDIS_80211_AUTHMODE_WPA; if (n->ni_val == WPA_ASE_8021X_PSK) arg = NDIS_80211_AUTHMODE_WPAPSK; DPRINTF(("Setting WPA auth mode to %d\n", arg)); i = sizeof(arg); if (ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &i)) return (ENOTSUP); i = sizeof(arg); ndis_get_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &i); /* Now configure the desired ciphers. */ /* First, set up the multicast group cipher. */ n = (struct ndis_ie *)&w->wpa_mcipher[0]; if (ndis_set_cipher(sc, n->ni_val)) return (ENOTSUP); /* Now start looking around for the unicast ciphers. */ pos = (char *)&w->wpa_uciphers[0]; n = (struct ndis_ie *)pos; for (i = 0; i < w->wpa_uciphercnt; i++) { if (ndis_set_cipher(sc, n->ni_val)) return (ENOTSUP); n++; } return (0); } static void ndis_media_status(struct ifnet *ifp, struct ifmediareq *imr) { struct ieee80211vap *vap = ifp->if_softc; struct ndis_softc *sc = vap->iv_ic->ic_softc; uint32_t txrate; int len; if (!NDIS_INITIALIZED(sc)) return; len = sizeof(txrate); if (ndis_get_info(sc, OID_GEN_LINK_SPEED, &txrate, &len) == 0) vap->iv_bss->ni_txrate = txrate / 5000; ieee80211_media_status(ifp, imr); } static void ndis_setstate_80211(struct ndis_softc *sc) { struct ieee80211com *ic = &sc->ndis_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); ndis_80211_macaddr bssid; ndis_80211_config config; int rval = 0, len; uint32_t arg; if (!NDIS_INITIALIZED(sc)) { DPRINTF(("%s: NDIS not initialized\n", __func__)); return; } /* Disassociate and turn off radio. */ len = sizeof(arg); arg = 1; ndis_set_info(sc, OID_802_11_DISASSOCIATE, &arg, &len); /* Set network infrastructure mode. */ len = sizeof(arg); if (ic->ic_opmode == IEEE80211_M_IBSS) arg = NDIS_80211_NET_INFRA_IBSS; else arg = NDIS_80211_NET_INFRA_BSS; rval = ndis_set_info(sc, OID_802_11_INFRASTRUCTURE_MODE, &arg, &len); if (rval) device_printf (sc->ndis_dev, "set infra failed: %d\n", rval); /* Set power management */ len = sizeof(arg); if (vap->iv_flags & IEEE80211_F_PMGTON) arg = NDIS_80211_POWERMODE_FAST_PSP; else arg = NDIS_80211_POWERMODE_CAM; ndis_set_info(sc, OID_802_11_POWER_MODE, &arg, &len); /* Set TX power */ if ((ic->ic_caps & IEEE80211_C_TXPMGT) && ic->ic_txpowlimit < nitems(dBm2mW)) { arg = dBm2mW[ic->ic_txpowlimit]; len = sizeof(arg); ndis_set_info(sc, OID_802_11_TX_POWER_LEVEL, &arg, &len); } /* * Default encryption mode to off, authentication * to open and privacy to 'accept everything.' */ len = sizeof(arg); arg = NDIS_80211_WEPSTAT_DISABLED; ndis_set_info(sc, OID_802_11_ENCRYPTION_STATUS, &arg, &len); len = sizeof(arg); arg = NDIS_80211_AUTHMODE_OPEN; ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &len); /* * Note that OID_802_11_PRIVACY_FILTER is optional: * not all drivers implement it. */ len = sizeof(arg); arg = NDIS_80211_PRIVFILT_8021XWEP; ndis_set_info(sc, OID_802_11_PRIVACY_FILTER, &arg, &len); len = sizeof(config); bzero((char *)&config, len); config.nc_length = len; config.nc_fhconfig.ncf_length = sizeof(ndis_80211_config_fh); rval = ndis_get_info(sc, OID_802_11_CONFIGURATION, &config, &len); /* * Some drivers expect us to initialize these values, so * provide some defaults. */ if (config.nc_beaconperiod == 0) config.nc_beaconperiod = 100; if (config.nc_atimwin == 0) config.nc_atimwin = 100; if (config.nc_fhconfig.ncf_dwelltime == 0) config.nc_fhconfig.ncf_dwelltime = 200; if (rval == 0 && ic->ic_bsschan != IEEE80211_CHAN_ANYC) { int chan, chanflag; chan = ieee80211_chan2ieee(ic, ic->ic_bsschan); chanflag = config.nc_dsconfig > 2500000 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ; if (chan != ieee80211_mhz2ieee(config.nc_dsconfig / 1000, 0)) { config.nc_dsconfig = ic->ic_bsschan->ic_freq * 1000; len = sizeof(config); config.nc_length = len; config.nc_fhconfig.ncf_length = sizeof(ndis_80211_config_fh); DPRINTF(("Setting channel to %ukHz\n", config.nc_dsconfig)); rval = ndis_set_info(sc, OID_802_11_CONFIGURATION, &config, &len); if (rval) device_printf(sc->ndis_dev, "couldn't change " "DS config to %ukHz: %d\n", config.nc_dsconfig, rval); } } else if (rval) device_printf(sc->ndis_dev, "couldn't retrieve " "channel info: %d\n", rval); /* Set the BSSID to our value so the driver doesn't associate */ len = IEEE80211_ADDR_LEN; bcopy(vap->iv_myaddr, bssid, len); DPRINTF(("Setting BSSID to %6D\n", (uint8_t *)&bssid, ":")); rval = ndis_set_info(sc, OID_802_11_BSSID, &bssid, &len); if (rval) device_printf(sc->ndis_dev, "setting BSSID failed: %d\n", rval); } static void ndis_auth_and_assoc(struct ndis_softc *sc, struct ieee80211vap *vap) { struct ieee80211_node *ni = vap->iv_bss; ndis_80211_ssid ssid; ndis_80211_macaddr bssid; ndis_80211_wep wep; int i, rval = 0, len, error; uint32_t arg; if (!NDIS_INITIALIZED(sc)) { DPRINTF(("%s: NDIS not initialized\n", __func__)); return; } /* Initial setup */ ndis_setstate_80211(sc); /* Set network infrastructure mode. */ len = sizeof(arg); if (vap->iv_opmode == IEEE80211_M_IBSS) arg = NDIS_80211_NET_INFRA_IBSS; else arg = NDIS_80211_NET_INFRA_BSS; rval = ndis_set_info(sc, OID_802_11_INFRASTRUCTURE_MODE, &arg, &len); if (rval) device_printf (sc->ndis_dev, "set infra failed: %d\n", rval); /* Set RTS threshold */ len = sizeof(arg); arg = vap->iv_rtsthreshold; ndis_set_info(sc, OID_802_11_RTS_THRESHOLD, &arg, &len); /* Set fragmentation threshold */ len = sizeof(arg); arg = vap->iv_fragthreshold; ndis_set_info(sc, OID_802_11_FRAGMENTATION_THRESHOLD, &arg, &len); /* Set WEP */ if (vap->iv_flags & IEEE80211_F_PRIVACY && !(vap->iv_flags & IEEE80211_F_WPA)) { int keys_set = 0; if (ni->ni_authmode == IEEE80211_AUTH_SHARED) { len = sizeof(arg); arg = NDIS_80211_AUTHMODE_SHARED; DPRINTF(("Setting shared auth\n")); ndis_set_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &len); } for (i = 0; i < IEEE80211_WEP_NKID; i++) { if (vap->iv_nw_keys[i].wk_keylen) { if (vap->iv_nw_keys[i].wk_cipher->ic_cipher != IEEE80211_CIPHER_WEP) continue; bzero((char *)&wep, sizeof(wep)); wep.nw_keylen = vap->iv_nw_keys[i].wk_keylen; /* * 5, 13 and 16 are the only valid * key lengths. Anything in between * will be zero padded out to the * next highest boundary. */ if (vap->iv_nw_keys[i].wk_keylen < 5) wep.nw_keylen = 5; else if (vap->iv_nw_keys[i].wk_keylen > 5 && vap->iv_nw_keys[i].wk_keylen < 13) wep.nw_keylen = 13; else if (vap->iv_nw_keys[i].wk_keylen > 13 && vap->iv_nw_keys[i].wk_keylen < 16) wep.nw_keylen = 16; wep.nw_keyidx = i; wep.nw_length = (sizeof(uint32_t) * 3) + wep.nw_keylen; if (i == vap->iv_def_txkey) wep.nw_keyidx |= NDIS_80211_WEPKEY_TX; bcopy(vap->iv_nw_keys[i].wk_key, wep.nw_keydata, wep.nw_length); len = sizeof(wep); DPRINTF(("Setting WEP key %d\n", i)); rval = ndis_set_info(sc, OID_802_11_ADD_WEP, &wep, &len); if (rval) device_printf(sc->ndis_dev, "set wepkey failed: %d\n", rval); keys_set++; } } if (keys_set) { DPRINTF(("Setting WEP on\n")); arg = NDIS_80211_WEPSTAT_ENABLED; len = sizeof(arg); rval = ndis_set_info(sc, OID_802_11_WEP_STATUS, &arg, &len); if (rval) device_printf(sc->ndis_dev, "enable WEP failed: %d\n", rval); if (vap->iv_flags & IEEE80211_F_DROPUNENC) arg = NDIS_80211_PRIVFILT_8021XWEP; else arg = NDIS_80211_PRIVFILT_ACCEPTALL; len = sizeof(arg); ndis_set_info(sc, OID_802_11_PRIVACY_FILTER, &arg, &len); } } /* Set up WPA. */ if ((vap->iv_flags & IEEE80211_F_WPA) && vap->iv_appie_assocreq != NULL) { struct ieee80211_appie *ie = vap->iv_appie_assocreq; error = ndis_set_wpa(sc, ie->ie_data, ie->ie_len); if (error != 0) device_printf(sc->ndis_dev, "WPA setup failed\n"); } #ifdef notyet /* Set network type. */ arg = 0; switch (vap->iv_curmode) { case IEEE80211_MODE_11A: arg = NDIS_80211_NETTYPE_11OFDM5; break; case IEEE80211_MODE_11B: arg = NDIS_80211_NETTYPE_11DS; break; case IEEE80211_MODE_11G: arg = NDIS_80211_NETTYPE_11OFDM24; break; default: device_printf(sc->ndis_dev, "unknown mode: %d\n", vap->iv_curmode); } if (arg) { DPRINTF(("Setting network type to %d\n", arg)); len = sizeof(arg); rval = ndis_set_info(sc, OID_802_11_NETWORK_TYPE_IN_USE, &arg, &len); if (rval) device_printf(sc->ndis_dev, "set nettype failed: %d\n", rval); } #endif /* * If the user selected a specific BSSID, try * to use that one. This is useful in the case where * there are several APs in range with the same network * name. To delete the BSSID, we use the broadcast * address as the BSSID. * Note that some drivers seem to allow setting a BSSID * in ad-hoc mode, which has the effect of forcing the * NIC to create an ad-hoc cell with a specific BSSID, * instead of a randomly chosen one. However, the net80211 * code makes the assumtion that the BSSID setting is invalid * when you're in ad-hoc mode, so we don't allow that here. */ len = IEEE80211_ADDR_LEN; if (vap->iv_flags & IEEE80211_F_DESBSSID && vap->iv_opmode != IEEE80211_M_IBSS) bcopy(ni->ni_bssid, bssid, len); else bcopy(ieee80211broadcastaddr, bssid, len); DPRINTF(("Setting BSSID to %6D\n", (uint8_t *)&bssid, ":")); rval = ndis_set_info(sc, OID_802_11_BSSID, &bssid, &len); if (rval) device_printf(sc->ndis_dev, "setting BSSID failed: %d\n", rval); /* Set SSID -- always do this last. */ #ifdef NDIS_DEBUG if (ndis_debug > 0) { printf("Setting ESSID to "); ieee80211_print_essid(ni->ni_essid, ni->ni_esslen); printf("\n"); } #endif len = sizeof(ssid); bzero((char *)&ssid, len); ssid.ns_ssidlen = ni->ni_esslen; if (ssid.ns_ssidlen == 0) { ssid.ns_ssidlen = 1; } else bcopy(ni->ni_essid, ssid.ns_ssid, ssid.ns_ssidlen); rval = ndis_set_info(sc, OID_802_11_SSID, &ssid, &len); if (rval) device_printf (sc->ndis_dev, "set ssid failed: %d\n", rval); return; } static int ndis_get_bssid_list(sc, bl) struct ndis_softc *sc; ndis_80211_bssid_list_ex **bl; { int len, error; len = sizeof(uint32_t) + (sizeof(ndis_wlan_bssid_ex) * 16); *bl = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (*bl == NULL) return (ENOMEM); error = ndis_get_info(sc, OID_802_11_BSSID_LIST, *bl, &len); if (error == ENOSPC) { free(*bl, M_DEVBUF); *bl = malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO); if (*bl == NULL) return (ENOMEM); error = ndis_get_info(sc, OID_802_11_BSSID_LIST, *bl, &len); } if (error) { DPRINTF(("%s: failed to read\n", __func__)); free(*bl, M_DEVBUF); return (error); } return (0); } static int ndis_get_assoc(struct ndis_softc *sc, ndis_wlan_bssid_ex **assoc) { struct ieee80211com *ic = &sc->ndis_ic; struct ieee80211vap *vap; struct ieee80211_node *ni; ndis_80211_bssid_list_ex *bl; ndis_wlan_bssid_ex *bs; ndis_80211_macaddr bssid; int i, len, error; if (!sc->ndis_link) return (ENOENT); len = sizeof(bssid); error = ndis_get_info(sc, OID_802_11_BSSID, &bssid, &len); if (error) { device_printf(sc->ndis_dev, "failed to get bssid\n"); return (ENOENT); } vap = TAILQ_FIRST(&ic->ic_vaps); ni = vap->iv_bss; error = ndis_get_bssid_list(sc, &bl); if (error) return (error); bs = (ndis_wlan_bssid_ex *)&bl->nblx_bssid[0]; for (i = 0; i < bl->nblx_items; i++) { if (bcmp(bs->nwbx_macaddr, bssid, sizeof(bssid)) == 0) { *assoc = malloc(bs->nwbx_len, M_TEMP, M_NOWAIT); if (*assoc == NULL) { free(bl, M_TEMP); return (ENOMEM); } bcopy((char *)bs, (char *)*assoc, bs->nwbx_len); free(bl, M_TEMP); if (ic->ic_opmode == IEEE80211_M_STA) ni->ni_associd = 1 | 0xc000; /* fake associd */ return (0); } bs = (ndis_wlan_bssid_ex *)((char *)bs + bs->nwbx_len); } free(bl, M_TEMP); return (ENOENT); } static void ndis_getstate_80211(struct ndis_softc *sc) { struct ieee80211com *ic = &sc->ndis_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct ieee80211_node *ni = vap->iv_bss; ndis_wlan_bssid_ex *bs; int rval, len, i = 0; int chanflag; uint32_t arg; if (!NDIS_INITIALIZED(sc)) return; if ((rval = ndis_get_assoc(sc, &bs)) != 0) return; /* We're associated, retrieve info on the current bssid. */ ic->ic_curmode = ndis_nettype_mode(bs->nwbx_nettype); chanflag = ndis_nettype_chan(bs->nwbx_nettype); IEEE80211_ADDR_COPY(ni->ni_bssid, bs->nwbx_macaddr); /* Get SSID from current association info. */ bcopy(bs->nwbx_ssid.ns_ssid, ni->ni_essid, bs->nwbx_ssid.ns_ssidlen); ni->ni_esslen = bs->nwbx_ssid.ns_ssidlen; if (ic->ic_caps & IEEE80211_C_PMGT) { len = sizeof(arg); rval = ndis_get_info(sc, OID_802_11_POWER_MODE, &arg, &len); if (rval) device_printf(sc->ndis_dev, "get power mode failed: %d\n", rval); if (arg == NDIS_80211_POWERMODE_CAM) vap->iv_flags &= ~IEEE80211_F_PMGTON; else vap->iv_flags |= IEEE80211_F_PMGTON; } /* Get TX power */ if (ic->ic_caps & IEEE80211_C_TXPMGT) { len = sizeof(arg); ndis_get_info(sc, OID_802_11_TX_POWER_LEVEL, &arg, &len); for (i = 0; i < nitems(dBm2mW); i++) if (dBm2mW[i] >= arg) break; ic->ic_txpowlimit = i; } /* * Use the current association information to reflect * what channel we're on. */ ic->ic_curchan = ieee80211_find_channel(ic, bs->nwbx_config.nc_dsconfig / 1000, chanflag); if (ic->ic_curchan == NULL) ic->ic_curchan = &ic->ic_channels[0]; ni->ni_chan = ic->ic_curchan; ic->ic_bsschan = ic->ic_curchan; free(bs, M_TEMP); /* * Determine current authentication mode. */ len = sizeof(arg); rval = ndis_get_info(sc, OID_802_11_AUTHENTICATION_MODE, &arg, &len); if (rval) device_printf(sc->ndis_dev, "get authmode status failed: %d\n", rval); else { vap->iv_flags &= ~IEEE80211_F_WPA; switch (arg) { case NDIS_80211_AUTHMODE_OPEN: ni->ni_authmode = IEEE80211_AUTH_OPEN; break; case NDIS_80211_AUTHMODE_SHARED: ni->ni_authmode = IEEE80211_AUTH_SHARED; break; case NDIS_80211_AUTHMODE_AUTO: ni->ni_authmode = IEEE80211_AUTH_AUTO; break; case NDIS_80211_AUTHMODE_WPA: case NDIS_80211_AUTHMODE_WPAPSK: case NDIS_80211_AUTHMODE_WPANONE: ni->ni_authmode = IEEE80211_AUTH_WPA; vap->iv_flags |= IEEE80211_F_WPA1; break; case NDIS_80211_AUTHMODE_WPA2: case NDIS_80211_AUTHMODE_WPA2PSK: ni->ni_authmode = IEEE80211_AUTH_WPA; vap->iv_flags |= IEEE80211_F_WPA2; break; default: ni->ni_authmode = IEEE80211_AUTH_NONE; break; } } len = sizeof(arg); rval = ndis_get_info(sc, OID_802_11_WEP_STATUS, &arg, &len); if (rval) device_printf(sc->ndis_dev, "get wep status failed: %d\n", rval); if (arg == NDIS_80211_WEPSTAT_ENABLED) vap->iv_flags |= IEEE80211_F_PRIVACY|IEEE80211_F_DROPUNENC; else vap->iv_flags &= ~(IEEE80211_F_PRIVACY|IEEE80211_F_DROPUNENC); } static int ndis_ifioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct ndis_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int i, error = 0; /*NDIS_LOCK(sc);*/ switch (command) { case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (sc->ndis_running && ifp->if_flags & IFF_PROMISC && !(sc->ndis_if_flags & IFF_PROMISC)) { sc->ndis_filter |= NDIS_PACKET_TYPE_PROMISCUOUS; i = sizeof(sc->ndis_filter); error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER, &sc->ndis_filter, &i); } else if (sc->ndis_running && !(ifp->if_flags & IFF_PROMISC) && sc->ndis_if_flags & IFF_PROMISC) { sc->ndis_filter &= ~NDIS_PACKET_TYPE_PROMISCUOUS; i = sizeof(sc->ndis_filter); error = ndis_set_info(sc, OID_GEN_CURRENT_PACKET_FILTER, &sc->ndis_filter, &i); } else ndis_init(sc); } else { if (sc->ndis_running) ndis_stop(sc); } sc->ndis_if_flags = ifp->if_flags; error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: ndis_setmulti(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; case SIOCSIFCAP: ifp->if_capenable = ifr->ifr_reqcap; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist = sc->ndis_hwassist; else ifp->if_hwassist = 0; ndis_set_offload(sc); break; default: error = ether_ioctl(ifp, command, data); break; } /*NDIS_UNLOCK(sc);*/ return(error); } static int ndis_80211ioctl(struct ieee80211com *ic, u_long cmd, void *data) { struct ndis_softc *sc = ic->ic_softc; struct ifreq *ifr = data; struct ndis_oid_data oid; struct ndis_evt evt; void *oidbuf = NULL; int error = 0; if ((error = priv_check(curthread, PRIV_DRIVER)) != 0) return (error); switch (cmd) { case SIOCGDRVSPEC: case SIOCSDRVSPEC: error = copyin(ifr->ifr_data, &oid, sizeof(oid)); if (error) break; oidbuf = malloc(oid.len, M_TEMP, M_WAITOK | M_ZERO); error = copyin(ifr->ifr_data + sizeof(oid), oidbuf, oid.len); } if (error) { free(oidbuf, M_TEMP); return (error); } switch (cmd) { case SIOCGDRVSPEC: error = ndis_get_info(sc, oid.oid, oidbuf, &oid.len); break; case SIOCSDRVSPEC: error = ndis_set_info(sc, oid.oid, oidbuf, &oid.len); break; case SIOCGPRIVATE_0: NDIS_LOCK(sc); if (sc->ndis_evt[sc->ndis_evtcidx].ne_sts == 0) { error = ENOENT; NDIS_UNLOCK(sc); break; } error = copyin(ifr->ifr_data, &evt, sizeof(evt)); if (error) { NDIS_UNLOCK(sc); break; } if (evt.ne_len < sc->ndis_evt[sc->ndis_evtcidx].ne_len) { error = ENOSPC; NDIS_UNLOCK(sc); break; } error = copyout(&sc->ndis_evt[sc->ndis_evtcidx], ifr->ifr_data, sizeof(uint32_t) * 2); if (error) { NDIS_UNLOCK(sc); break; } if (sc->ndis_evt[sc->ndis_evtcidx].ne_len) { error = copyout(sc->ndis_evt[sc->ndis_evtcidx].ne_buf, ifr->ifr_data + (sizeof(uint32_t) * 2), sc->ndis_evt[sc->ndis_evtcidx].ne_len); if (error) { NDIS_UNLOCK(sc); break; } free(sc->ndis_evt[sc->ndis_evtcidx].ne_buf, M_TEMP); sc->ndis_evt[sc->ndis_evtcidx].ne_buf = NULL; } sc->ndis_evt[sc->ndis_evtcidx].ne_len = 0; sc->ndis_evt[sc->ndis_evtcidx].ne_sts = 0; NDIS_EVTINC(sc->ndis_evtcidx); NDIS_UNLOCK(sc); break; default: error = ENOTTY; break; } switch (cmd) { case SIOCGDRVSPEC: case SIOCSDRVSPEC: error = copyout(&oid, ifr->ifr_data, sizeof(oid)); if (error) break; error = copyout(oidbuf, ifr->ifr_data + sizeof(oid), oid.len); } free(oidbuf, M_TEMP); return (error); } int ndis_del_key(struct ieee80211vap *vap, const struct ieee80211_key *key) { struct ndis_softc *sc = vap->iv_ic->ic_softc; ndis_80211_key rkey; int len, error = 0; bzero((char *)&rkey, sizeof(rkey)); len = sizeof(rkey); rkey.nk_len = len; rkey.nk_keyidx = key->wk_keyix; bcopy(vap->iv_ifp->if_broadcastaddr, rkey.nk_bssid, IEEE80211_ADDR_LEN); error = ndis_set_info(sc, OID_802_11_REMOVE_KEY, &rkey, &len); if (error) return (0); return (1); } /* * In theory this could be called for any key, but we'll * only use it for WPA TKIP or AES keys. These need to be * set after initial authentication with the AP. */ static int ndis_add_key(struct ieee80211vap *vap, const struct ieee80211_key *key) { struct ndis_softc *sc = vap->iv_ic->ic_softc; ndis_80211_key rkey; int len, error = 0; switch (key->wk_cipher->ic_cipher) { case IEEE80211_CIPHER_TKIP: len = sizeof(ndis_80211_key); bzero((char *)&rkey, sizeof(rkey)); rkey.nk_len = len; rkey.nk_keylen = key->wk_keylen; if (key->wk_flags & IEEE80211_KEY_SWMIC) rkey.nk_keylen += 16; /* key index - gets weird in NDIS */ if (key->wk_keyix != IEEE80211_KEYIX_NONE) rkey.nk_keyidx = key->wk_keyix; else rkey.nk_keyidx = 0; if (key->wk_flags & IEEE80211_KEY_XMIT) rkey.nk_keyidx |= 1 << 31; if (key->wk_flags & IEEE80211_KEY_GROUP) { bcopy(ieee80211broadcastaddr, rkey.nk_bssid, IEEE80211_ADDR_LEN); } else { bcopy(vap->iv_bss->ni_bssid, rkey.nk_bssid, IEEE80211_ADDR_LEN); /* pairwise key */ rkey.nk_keyidx |= 1 << 30; } /* need to set bit 29 based on keyrsc */ rkey.nk_keyrsc = key->wk_keyrsc[0]; /* XXX need tid */ if (rkey.nk_keyrsc) rkey.nk_keyidx |= 1 << 29; if (key->wk_flags & IEEE80211_KEY_SWMIC) { bcopy(key->wk_key, rkey.nk_keydata, 16); bcopy(key->wk_key + 24, rkey.nk_keydata + 16, 8); bcopy(key->wk_key + 16, rkey.nk_keydata + 24, 8); } else bcopy(key->wk_key, rkey.nk_keydata, key->wk_keylen); error = ndis_set_info(sc, OID_802_11_ADD_KEY, &rkey, &len); break; case IEEE80211_CIPHER_WEP: error = 0; break; /* * I don't know how to set up keys for the AES * cipher yet. Is it the same as TKIP? */ case IEEE80211_CIPHER_AES_CCM: default: error = ENOTTY; break; } /* We need to return 1 for success, 0 for failure. */ if (error) return (0); return (1); } static void ndis_resettask(d, arg) device_object *d; void *arg; { struct ndis_softc *sc; sc = arg; ndis_reset_nic(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void ndis_stop(struct ndis_softc *sc) { int i; callout_drain(&sc->ndis_stat_callout); NDIS_LOCK(sc); sc->ndis_tx_timer = 0; sc->ndis_link = 0; if (!sc->ndis_80211) sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->ndis_running = 0; NDIS_UNLOCK(sc); if (sc->ndis_iftype != PNPBus || (sc->ndis_iftype == PNPBus && !(sc->ndisusb_status & NDISUSB_STATUS_DETACH) && ndisusb_halt != 0)) ndis_halt_nic(sc); NDIS_LOCK(sc); for (i = 0; i < NDIS_EVENTS; i++) { if (sc->ndis_evt[i].ne_sts && sc->ndis_evt[i].ne_buf != NULL) { free(sc->ndis_evt[i].ne_buf, M_TEMP); sc->ndis_evt[i].ne_buf = NULL; } sc->ndis_evt[i].ne_sts = 0; sc->ndis_evt[i].ne_len = 0; } sc->ndis_evtcidx = 0; sc->ndis_evtpidx = 0; NDIS_UNLOCK(sc); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ void ndis_shutdown(dev) device_t dev; { struct ndis_softc *sc; sc = device_get_softc(dev); ndis_stop(sc); } static int ndis_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct ndis_vap *nvp = NDIS_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct ndis_softc *sc = ic->ic_softc; enum ieee80211_state ostate; DPRINTF(("%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate])); ostate = vap->iv_state; vap->iv_state = nstate; switch (nstate) { /* pass on to net80211 */ case IEEE80211_S_INIT: case IEEE80211_S_SCAN: return nvp->newstate(vap, nstate, arg); case IEEE80211_S_ASSOC: if (ostate != IEEE80211_S_AUTH) { IEEE80211_UNLOCK(ic); ndis_auth_and_assoc(sc, vap); IEEE80211_LOCK(ic); } break; case IEEE80211_S_AUTH: IEEE80211_UNLOCK(ic); ndis_auth_and_assoc(sc, vap); if (vap->iv_state == IEEE80211_S_AUTH) /* XXX */ ieee80211_new_state(vap, IEEE80211_S_ASSOC, 0); IEEE80211_LOCK(ic); break; default: break; } return (0); } static void ndis_scan(void *arg) { struct ieee80211vap *vap = arg; ieee80211_scan_done(vap); } static void ndis_scan_results(struct ndis_softc *sc) { struct ieee80211com *ic = &sc->ndis_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); ndis_80211_bssid_list_ex *bl; ndis_wlan_bssid_ex *wb; struct ieee80211_scanparams sp; struct ieee80211_frame wh; struct ieee80211_channel *saved_chan; int i, j; int rssi, noise, freq, chanflag; uint8_t ssid[2+IEEE80211_NWID_LEN]; uint8_t rates[2+IEEE80211_RATE_MAXSIZE]; uint8_t *frm, *efrm; saved_chan = ic->ic_curchan; noise = -96; if (ndis_get_bssid_list(sc, &bl)) return; DPRINTF(("%s: %d results\n", __func__, bl->nblx_items)); wb = &bl->nblx_bssid[0]; for (i = 0; i < bl->nblx_items; i++) { memset(&sp, 0, sizeof(sp)); memcpy(wh.i_addr2, wb->nwbx_macaddr, sizeof(wh.i_addr2)); memcpy(wh.i_addr3, wb->nwbx_macaddr, sizeof(wh.i_addr3)); rssi = 100 * (wb->nwbx_rssi - noise) / (-32 - noise); rssi = max(0, min(rssi, 100)); /* limit 0 <= rssi <= 100 */ if (wb->nwbx_privacy) sp.capinfo |= IEEE80211_CAPINFO_PRIVACY; sp.bintval = wb->nwbx_config.nc_beaconperiod; switch (wb->nwbx_netinfra) { case NDIS_80211_NET_INFRA_IBSS: sp.capinfo |= IEEE80211_CAPINFO_IBSS; break; case NDIS_80211_NET_INFRA_BSS: sp.capinfo |= IEEE80211_CAPINFO_ESS; break; } sp.rates = &rates[0]; for (j = 0; j < IEEE80211_RATE_MAXSIZE; j++) { /* XXX - check units */ if (wb->nwbx_supportedrates[j] == 0) break; rates[2 + j] = wb->nwbx_supportedrates[j] & 0x7f; } rates[1] = j; sp.ssid = (uint8_t *)&ssid[0]; memcpy(sp.ssid + 2, &wb->nwbx_ssid.ns_ssid, wb->nwbx_ssid.ns_ssidlen); sp.ssid[1] = wb->nwbx_ssid.ns_ssidlen; chanflag = ndis_nettype_chan(wb->nwbx_nettype); freq = wb->nwbx_config.nc_dsconfig / 1000; sp.chan = sp.bchan = ieee80211_mhz2ieee(freq, chanflag); /* Hack ic->ic_curchan to be in sync with the scan result */ ic->ic_curchan = ieee80211_find_channel(ic, freq, chanflag); if (ic->ic_curchan == NULL) ic->ic_curchan = &ic->ic_channels[0]; /* Process extended info from AP */ if (wb->nwbx_len > sizeof(ndis_wlan_bssid)) { frm = (uint8_t *)&wb->nwbx_ies; efrm = frm + wb->nwbx_ielen; if (efrm - frm < 12) goto done; sp.tstamp = frm; frm += 8; sp.bintval = le16toh(*(uint16_t *)frm); frm += 2; sp.capinfo = le16toh(*(uint16_t *)frm); frm += 2; sp.ies = frm; sp.ies_len = efrm - frm; } done: DPRINTF(("scan: bssid %s chan %dMHz (%d/%d) rssi %d\n", ether_sprintf(wb->nwbx_macaddr), freq, sp.bchan, chanflag, rssi)); ieee80211_add_scan(vap, ic->ic_curchan, &sp, &wh, 0, rssi, noise); wb = (ndis_wlan_bssid_ex *)((char *)wb + wb->nwbx_len); } free(bl, M_DEVBUF); /* Restore the channel after messing with it */ ic->ic_curchan = saved_chan; } static void ndis_scan_start(struct ieee80211com *ic) { struct ndis_softc *sc = ic->ic_softc; struct ieee80211vap *vap; struct ieee80211_scan_state *ss; ndis_80211_ssid ssid; int error, len; ss = ic->ic_scan; vap = TAILQ_FIRST(&ic->ic_vaps); if (!NDIS_INITIALIZED(sc)) { DPRINTF(("%s: scan aborted\n", __func__)); ieee80211_cancel_scan(vap); return; } len = sizeof(ssid); bzero((char *)&ssid, len); if (ss->ss_nssid == 0) ssid.ns_ssidlen = 1; else { /* Perform a directed scan */ ssid.ns_ssidlen = ss->ss_ssid[0].len; bcopy(ss->ss_ssid[0].ssid, ssid.ns_ssid, ssid.ns_ssidlen); } error = ndis_set_info(sc, OID_802_11_SSID, &ssid, &len); if (error) DPRINTF(("%s: set ESSID failed\n", __func__)); len = 0; error = ndis_set_info(sc, OID_802_11_BSSID_LIST_SCAN, NULL, &len); if (error) { DPRINTF(("%s: scan command failed\n", __func__)); ieee80211_cancel_scan(vap); return; } /* Set a timer to collect the results */ callout_reset(&sc->ndis_scan_callout, hz * 3, ndis_scan, vap); } static void ndis_set_channel(struct ieee80211com *ic) { /* ignore */ } static void ndis_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { /* ignore */ } static void ndis_scan_mindwell(struct ieee80211_scan_state *ss) { /* NB: don't try to abort scan; wait for firmware to finish */ } static void ndis_scan_end(struct ieee80211com *ic) { struct ndis_softc *sc = ic->ic_softc; ndis_scan_results(sc); } Index: head/sys/dev/iwi/if_iwi.c =================================================================== --- head/sys/dev/iwi/if_iwi.c (revision 327948) +++ head/sys/dev/iwi/if_iwi.c (revision 327949) @@ -1,3619 +1,3619 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2004, 2005 * Damien Bergamini . All rights reserved. * Copyright (c) 2005-2006 Sam Leffler, Errno Consulting * Copyright (c) 2007 Andrew Thompson * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /*- * Intel(R) PRO/Wireless 2200BG/2225BG/2915ABG driver * http://www.intel.com/network/connectivity/products/wireless/prowireless_mobile.htm */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define IWI_DEBUG #ifdef IWI_DEBUG #define DPRINTF(x) do { if (iwi_debug > 0) printf x; } while (0) #define DPRINTFN(n, x) do { if (iwi_debug >= (n)) printf x; } while (0) int iwi_debug = 0; SYSCTL_INT(_debug, OID_AUTO, iwi, CTLFLAG_RW, &iwi_debug, 0, "iwi debug level"); static const char *iwi_fw_states[] = { "IDLE", /* IWI_FW_IDLE */ "LOADING", /* IWI_FW_LOADING */ "ASSOCIATING", /* IWI_FW_ASSOCIATING */ "DISASSOCIATING", /* IWI_FW_DISASSOCIATING */ "SCANNING", /* IWI_FW_SCANNING */ }; #else #define DPRINTF(x) #define DPRINTFN(n, x) #endif MODULE_DEPEND(iwi, pci, 1, 1, 1); MODULE_DEPEND(iwi, wlan, 1, 1, 1); MODULE_DEPEND(iwi, firmware, 1, 1, 1); enum { IWI_LED_TX, IWI_LED_RX, IWI_LED_POLL, }; struct iwi_ident { uint16_t vendor; uint16_t device; const char *name; }; static const struct iwi_ident iwi_ident_table[] = { { 0x8086, 0x4220, "Intel(R) PRO/Wireless 2200BG" }, { 0x8086, 0x4221, "Intel(R) PRO/Wireless 2225BG" }, { 0x8086, 0x4223, "Intel(R) PRO/Wireless 2915ABG" }, { 0x8086, 0x4224, "Intel(R) PRO/Wireless 2915ABG" }, { 0, 0, NULL } }; static const uint8_t def_chan_2ghz[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; static const uint8_t def_chan_5ghz_band1[] = { 36, 40, 44, 48, 52, 56, 60, 64 }; static const uint8_t def_chan_5ghz_band2[] = { 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140 }; static const uint8_t def_chan_5ghz_band3[] = { 149, 153, 157, 161, 165 }; static struct ieee80211vap *iwi_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void iwi_vap_delete(struct ieee80211vap *); static void iwi_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int iwi_alloc_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *, int); static void iwi_reset_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *); static void iwi_free_cmd_ring(struct iwi_softc *, struct iwi_cmd_ring *); static int iwi_alloc_tx_ring(struct iwi_softc *, struct iwi_tx_ring *, int, bus_addr_t, bus_addr_t); static void iwi_reset_tx_ring(struct iwi_softc *, struct iwi_tx_ring *); static void iwi_free_tx_ring(struct iwi_softc *, struct iwi_tx_ring *); static int iwi_alloc_rx_ring(struct iwi_softc *, struct iwi_rx_ring *, int); static void iwi_reset_rx_ring(struct iwi_softc *, struct iwi_rx_ring *); static void iwi_free_rx_ring(struct iwi_softc *, struct iwi_rx_ring *); static struct ieee80211_node *iwi_node_alloc(struct ieee80211vap *, const uint8_t [IEEE80211_ADDR_LEN]); static void iwi_node_free(struct ieee80211_node *); static void iwi_media_status(struct ifnet *, struct ifmediareq *); static int iwi_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void iwi_wme_init(struct iwi_softc *); static int iwi_wme_setparams(struct iwi_softc *); static int iwi_wme_update(struct ieee80211com *); static uint16_t iwi_read_prom_word(struct iwi_softc *, uint8_t); static void iwi_frame_intr(struct iwi_softc *, struct iwi_rx_data *, int, struct iwi_frame *); static void iwi_notification_intr(struct iwi_softc *, struct iwi_notif *); static void iwi_rx_intr(struct iwi_softc *); static void iwi_tx_intr(struct iwi_softc *, struct iwi_tx_ring *); static void iwi_intr(void *); static int iwi_cmd(struct iwi_softc *, uint8_t, void *, uint8_t); static void iwi_write_ibssnode(struct iwi_softc *, const u_int8_t [], int); static int iwi_tx_start(struct iwi_softc *, struct mbuf *, struct ieee80211_node *, int); static int iwi_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void iwi_start(struct iwi_softc *); static int iwi_transmit(struct ieee80211com *, struct mbuf *); static void iwi_watchdog(void *); static int iwi_ioctl(struct ieee80211com *, u_long, void *); static void iwi_parent(struct ieee80211com *); static void iwi_stop_master(struct iwi_softc *); static int iwi_reset(struct iwi_softc *); static int iwi_load_ucode(struct iwi_softc *, const struct iwi_fw *); static int iwi_load_firmware(struct iwi_softc *, const struct iwi_fw *); static void iwi_release_fw_dma(struct iwi_softc *sc); static int iwi_config(struct iwi_softc *); static int iwi_get_firmware(struct iwi_softc *, enum ieee80211_opmode); static void iwi_put_firmware(struct iwi_softc *); static void iwi_monitor_scan(void *, int); static int iwi_scanchan(struct iwi_softc *, unsigned long, int); static void iwi_scan_start(struct ieee80211com *); static void iwi_scan_end(struct ieee80211com *); static void iwi_set_channel(struct ieee80211com *); static void iwi_scan_curchan(struct ieee80211_scan_state *, unsigned long maxdwell); static void iwi_scan_mindwell(struct ieee80211_scan_state *); static int iwi_auth_and_assoc(struct iwi_softc *, struct ieee80211vap *); static void iwi_disassoc(void *, int); static int iwi_disassociate(struct iwi_softc *, int quiet); static void iwi_init_locked(struct iwi_softc *); static void iwi_init(void *); static int iwi_init_fw_dma(struct iwi_softc *, int); static void iwi_stop_locked(void *); static void iwi_stop(struct iwi_softc *); static void iwi_restart(void *, int); static int iwi_getrfkill(struct iwi_softc *); static void iwi_radio_on(void *, int); static void iwi_radio_off(void *, int); static void iwi_sysctlattach(struct iwi_softc *); static void iwi_led_event(struct iwi_softc *, int); static void iwi_ledattach(struct iwi_softc *); static void iwi_collect_bands(struct ieee80211com *, uint8_t [], size_t); static void iwi_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel []); static int iwi_probe(device_t); static int iwi_attach(device_t); static int iwi_detach(device_t); static int iwi_shutdown(device_t); static int iwi_suspend(device_t); static int iwi_resume(device_t); static device_method_t iwi_methods[] = { /* Device interface */ DEVMETHOD(device_probe, iwi_probe), DEVMETHOD(device_attach, iwi_attach), DEVMETHOD(device_detach, iwi_detach), DEVMETHOD(device_shutdown, iwi_shutdown), DEVMETHOD(device_suspend, iwi_suspend), DEVMETHOD(device_resume, iwi_resume), DEVMETHOD_END }; static driver_t iwi_driver = { "iwi", iwi_methods, sizeof (struct iwi_softc) }; static devclass_t iwi_devclass; DRIVER_MODULE(iwi, pci, iwi_driver, iwi_devclass, NULL, NULL); MODULE_VERSION(iwi, 1); static __inline uint8_t MEM_READ_1(struct iwi_softc *sc, uint32_t addr) { CSR_WRITE_4(sc, IWI_CSR_INDIRECT_ADDR, addr); return CSR_READ_1(sc, IWI_CSR_INDIRECT_DATA); } static __inline uint32_t MEM_READ_4(struct iwi_softc *sc, uint32_t addr) { CSR_WRITE_4(sc, IWI_CSR_INDIRECT_ADDR, addr); return CSR_READ_4(sc, IWI_CSR_INDIRECT_DATA); } static int iwi_probe(device_t dev) { const struct iwi_ident *ident; for (ident = iwi_ident_table; ident->name != NULL; ident++) { if (pci_get_vendor(dev) == ident->vendor && pci_get_device(dev) == ident->device) { device_set_desc(dev, ident->name); return (BUS_PROBE_DEFAULT); } } return ENXIO; } static int iwi_attach(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; uint16_t val; int i, error; sc->sc_dev = dev; sc->sc_ledevent = ticks; IWI_LOCK_INIT(sc); mbufq_init(&sc->sc_snd, ifqmaxlen); sc->sc_unr = new_unrhdr(1, IWI_MAX_IBSSNODE-1, &sc->sc_mtx); TASK_INIT(&sc->sc_radiontask, 0, iwi_radio_on, sc); TASK_INIT(&sc->sc_radiofftask, 0, iwi_radio_off, sc); TASK_INIT(&sc->sc_restarttask, 0, iwi_restart, sc); TASK_INIT(&sc->sc_disassoctask, 0, iwi_disassoc, sc); TASK_INIT(&sc->sc_monitortask, 0, iwi_monitor_scan, sc); callout_init_mtx(&sc->sc_wdtimer, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_rftimer, &sc->sc_mtx, 0); pci_write_config(dev, 0x41, 0, 1); /* enable bus-mastering */ pci_enable_busmaster(dev); i = PCIR_BAR(0); sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); goto fail; } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); i = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, RF_ACTIVE | RF_SHAREABLE); if (sc->irq == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); goto fail; } if (iwi_reset(sc) != 0) { device_printf(dev, "could not reset adapter\n"); goto fail; } /* * Allocate rings. */ if (iwi_alloc_cmd_ring(sc, &sc->cmdq, IWI_CMD_RING_COUNT) != 0) { device_printf(dev, "could not allocate Cmd ring\n"); goto fail; } for (i = 0; i < 4; i++) { error = iwi_alloc_tx_ring(sc, &sc->txq[i], IWI_TX_RING_COUNT, IWI_CSR_TX1_RIDX + i * 4, IWI_CSR_TX1_WIDX + i * 4); if (error != 0) { device_printf(dev, "could not allocate Tx ring %d\n", i+i); goto fail; } } if (iwi_alloc_rx_ring(sc, &sc->rxq, IWI_RX_RING_COUNT) != 0) { device_printf(dev, "could not allocate Rx ring\n"); goto fail; } iwi_wme_init(sc); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(dev); ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_IBSS /* IBSS mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ | IEEE80211_C_PMGT /* power save supported */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_WPA /* 802.11i */ | IEEE80211_C_WME /* 802.11e */ #if 0 | IEEE80211_C_BGSCAN /* capable of bg scanning */ #endif ; /* read MAC address from EEPROM */ val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 0); ic->ic_macaddr[0] = val & 0xff; ic->ic_macaddr[1] = val >> 8; val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 1); ic->ic_macaddr[2] = val & 0xff; ic->ic_macaddr[3] = val >> 8; val = iwi_read_prom_word(sc, IWI_EEPROM_MAC + 2); ic->ic_macaddr[4] = val & 0xff; ic->ic_macaddr[5] = val >> 8; iwi_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ieee80211_ifattach(ic); /* override default methods */ ic->ic_node_alloc = iwi_node_alloc; sc->sc_node_free = ic->ic_node_free; ic->ic_node_free = iwi_node_free; ic->ic_raw_xmit = iwi_raw_xmit; ic->ic_scan_start = iwi_scan_start; ic->ic_scan_end = iwi_scan_end; ic->ic_set_channel = iwi_set_channel; ic->ic_scan_curchan = iwi_scan_curchan; ic->ic_scan_mindwell = iwi_scan_mindwell; ic->ic_wme.wme_update = iwi_wme_update; ic->ic_vap_create = iwi_vap_create; ic->ic_vap_delete = iwi_vap_delete; ic->ic_ioctl = iwi_ioctl; ic->ic_transmit = iwi_transmit; ic->ic_parent = iwi_parent; ic->ic_getradiocaps = iwi_getradiocaps; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), IWI_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), IWI_RX_RADIOTAP_PRESENT); iwi_sysctlattach(sc); iwi_ledattach(sc); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, iwi_intr, sc, &sc->sc_ih); if (error != 0) { device_printf(dev, "could not set up interrupt\n"); goto fail; } if (bootverbose) ieee80211_announce(ic); return 0; fail: /* XXX fix */ iwi_detach(dev); return ENXIO; } static int iwi_detach(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; bus_teardown_intr(dev, sc->irq, sc->sc_ih); /* NB: do early to drain any pending tasks */ ieee80211_draintask(ic, &sc->sc_radiontask); ieee80211_draintask(ic, &sc->sc_radiofftask); ieee80211_draintask(ic, &sc->sc_restarttask); ieee80211_draintask(ic, &sc->sc_disassoctask); ieee80211_draintask(ic, &sc->sc_monitortask); iwi_stop(sc); ieee80211_ifdetach(ic); iwi_put_firmware(sc); iwi_release_fw_dma(sc); iwi_free_cmd_ring(sc, &sc->cmdq); iwi_free_tx_ring(sc, &sc->txq[0]); iwi_free_tx_ring(sc, &sc->txq[1]); iwi_free_tx_ring(sc, &sc->txq[2]); iwi_free_tx_ring(sc, &sc->txq[3]); iwi_free_rx_ring(sc, &sc->rxq); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq); bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem), sc->mem); delete_unrhdr(sc->sc_unr); mbufq_drain(&sc->sc_snd); IWI_LOCK_DESTROY(sc); return 0; } static struct ieee80211vap * iwi_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct iwi_softc *sc = ic->ic_softc; struct iwi_vap *ivp; struct ieee80211vap *vap; int i; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; /* * Get firmware image (and possibly dma memory) on mode change. */ if (iwi_get_firmware(sc, opmode)) return NULL; /* allocate DMA memory for mapping firmware image */ i = sc->fw_fw.size; if (sc->fw_boot.size > i) i = sc->fw_boot.size; /* XXX do we dma the ucode as well ? */ if (sc->fw_uc.size > i) i = sc->fw_uc.size; if (iwi_init_fw_dma(sc, i)) return NULL; ivp = malloc(sizeof(struct iwi_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &ivp->iwi_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override the default, the setting comes from the linux driver */ vap->iv_bmissthreshold = 24; /* override with driver methods */ ivp->iwi_newstate = vap->iv_newstate; vap->iv_newstate = iwi_newstate; /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, iwi_media_status, mac); ic->ic_opmode = opmode; return vap; } static void iwi_vap_delete(struct ieee80211vap *vap) { struct iwi_vap *ivp = IWI_VAP(vap); ieee80211_vap_detach(vap); free(ivp, M_80211_VAP); } static void iwi_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int iwi_alloc_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring, int count) { int error; ring->count = count; ring->queued = 0; ring->cur = ring->next = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * IWI_CMD_DESC_SIZE, 1, count * IWI_CMD_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * IWI_CMD_DESC_SIZE, iwi_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } return 0; fail: iwi_free_cmd_ring(sc, ring); return error; } static void iwi_reset_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring) { ring->queued = 0; ring->cur = ring->next = 0; } static void iwi_free_cmd_ring(struct iwi_softc *sc, struct iwi_cmd_ring *ring) { if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); } static int iwi_alloc_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring, int count, bus_addr_t csr_ridx, bus_addr_t csr_widx) { int i, error; ring->count = count; ring->queued = 0; ring->cur = ring->next = 0; ring->csr_ridx = csr_ridx; ring->csr_widx = csr_widx; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * IWI_TX_DESC_SIZE, 1, count * IWI_TX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * IWI_TX_DESC_SIZE, iwi_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } - ring->data = malloc(count * sizeof (struct iwi_tx_data), M_DEVBUF, + ring->data = mallocarray(count, sizeof(struct iwi_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWI_MAX_NSEG, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { error = bus_dmamap_create(ring->data_dmat, 0, &ring->data[i].map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } } return 0; fail: iwi_free_tx_ring(sc, ring); return error; } static void iwi_reset_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring) { struct iwi_tx_data *data; int i; for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } ring->queued = 0; ring->cur = ring->next = 0; } static void iwi_free_tx_ring(struct iwi_softc *sc, struct iwi_tx_ring *ring) { struct iwi_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int iwi_alloc_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring, int count) { struct iwi_rx_data *data; int i, error; ring->count = count; ring->cur = 0; - ring->data = malloc(count * sizeof (struct iwi_rx_data), M_DEVBUF, + ring->data = mallocarray(count, sizeof(struct iwi_rx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { data = &ring->data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, iwi_dma_map_addr, &data->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } data->reg = IWI_CSR_RX_BASE + i * 4; } return 0; fail: iwi_free_rx_ring(sc, ring); return error; } static void iwi_reset_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring) { ring->cur = 0; } static void iwi_free_rx_ring(struct iwi_softc *sc, struct iwi_rx_ring *ring) { struct iwi_rx_data *data; int i; if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int iwi_shutdown(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); iwi_stop(sc); iwi_put_firmware(sc); /* ??? XXX */ return 0; } static int iwi_suspend(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; ieee80211_suspend_all(ic); return 0; } static int iwi_resume(device_t dev) { struct iwi_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; pci_write_config(dev, 0x41, 0, 1); ieee80211_resume_all(ic); return 0; } static struct ieee80211_node * iwi_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct iwi_node *in; in = malloc(sizeof (struct iwi_node), M_80211_NODE, M_NOWAIT | M_ZERO); if (in == NULL) return NULL; /* XXX assign sta table entry for adhoc */ in->in_station = -1; return &in->in_node; } static void iwi_node_free(struct ieee80211_node *ni) { struct ieee80211com *ic = ni->ni_ic; struct iwi_softc *sc = ic->ic_softc; struct iwi_node *in = (struct iwi_node *)ni; if (in->in_station != -1) { DPRINTF(("%s mac %6D station %u\n", __func__, ni->ni_macaddr, ":", in->in_station)); free_unr(sc->sc_unr, in->in_station); } sc->sc_node_free(ni); } /* * Convert h/w rate code to IEEE rate code. */ static int iwi_cvtrate(int iwirate) { switch (iwirate) { case IWI_RATE_DS1: return 2; case IWI_RATE_DS2: return 4; case IWI_RATE_DS5: return 11; case IWI_RATE_DS11: return 22; case IWI_RATE_OFDM6: return 12; case IWI_RATE_OFDM9: return 18; case IWI_RATE_OFDM12: return 24; case IWI_RATE_OFDM18: return 36; case IWI_RATE_OFDM24: return 48; case IWI_RATE_OFDM36: return 72; case IWI_RATE_OFDM48: return 96; case IWI_RATE_OFDM54: return 108; } return 0; } /* * The firmware automatically adapts the transmit speed. We report its current * value here. */ static void iwi_media_status(struct ifnet *ifp, struct ifmediareq *imr) { struct ieee80211vap *vap = ifp->if_softc; struct ieee80211com *ic = vap->iv_ic; struct iwi_softc *sc = ic->ic_softc; struct ieee80211_node *ni; /* read current transmission rate from adapter */ ni = ieee80211_ref_node(vap->iv_bss); ni->ni_txrate = iwi_cvtrate(CSR_READ_4(sc, IWI_CSR_CURRENT_TX_RATE)); ieee80211_free_node(ni); ieee80211_media_status(ifp, imr); } static int iwi_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct iwi_vap *ivp = IWI_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct iwi_softc *sc = ic->ic_softc; IWI_LOCK_DECL; DPRINTF(("%s: %s -> %s flags 0x%x\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate], sc->flags)); IEEE80211_UNLOCK(ic); IWI_LOCK(sc); switch (nstate) { case IEEE80211_S_INIT: /* * NB: don't try to do this if iwi_stop_master has * shutdown the firmware and disabled interrupts. */ if (vap->iv_state == IEEE80211_S_RUN && (sc->flags & IWI_FLAG_FW_INITED)) iwi_disassociate(sc, 0); break; case IEEE80211_S_AUTH: iwi_auth_and_assoc(sc, vap); break; case IEEE80211_S_RUN: if (vap->iv_opmode == IEEE80211_M_IBSS && vap->iv_state == IEEE80211_S_SCAN) { /* * XXX when joining an ibss network we are called * with a SCAN -> RUN transition on scan complete. * Use that to call iwi_auth_and_assoc. On completing * the join we are then called again with an * AUTH -> RUN transition and we want to do nothing. * This is all totally bogus and needs to be redone. */ iwi_auth_and_assoc(sc, vap); } else if (vap->iv_opmode == IEEE80211_M_MONITOR) ieee80211_runtask(ic, &sc->sc_monitortask); break; case IEEE80211_S_ASSOC: /* * If we are transitioning from AUTH then just wait * for the ASSOC status to come back from the firmware. * Otherwise we need to issue the association request. */ if (vap->iv_state == IEEE80211_S_AUTH) break; iwi_auth_and_assoc(sc, vap); break; default: break; } IWI_UNLOCK(sc); IEEE80211_LOCK(ic); return ivp->iwi_newstate(vap, nstate, arg); } /* * WME parameters coming from IEEE 802.11e specification. These values are * already declared in ieee80211_proto.c, but they are static so they can't * be reused here. */ static const struct wmeParams iwi_wme_cck_params[WME_NUM_AC] = { { 0, 3, 5, 7, 0 }, /* WME_AC_BE */ { 0, 3, 5, 10, 0 }, /* WME_AC_BK */ { 0, 2, 4, 5, 188 }, /* WME_AC_VI */ { 0, 2, 3, 4, 102 } /* WME_AC_VO */ }; static const struct wmeParams iwi_wme_ofdm_params[WME_NUM_AC] = { { 0, 3, 4, 6, 0 }, /* WME_AC_BE */ { 0, 3, 4, 10, 0 }, /* WME_AC_BK */ { 0, 2, 3, 4, 94 }, /* WME_AC_VI */ { 0, 2, 2, 3, 47 } /* WME_AC_VO */ }; #define IWI_EXP2(v) htole16((1 << (v)) - 1) #define IWI_USEC(v) htole16(IEEE80211_TXOP_TO_US(v)) static void iwi_wme_init(struct iwi_softc *sc) { const struct wmeParams *wmep; int ac; memset(sc->wme, 0, sizeof sc->wme); for (ac = 0; ac < WME_NUM_AC; ac++) { /* set WME values for CCK modulation */ wmep = &iwi_wme_cck_params[ac]; sc->wme[1].aifsn[ac] = wmep->wmep_aifsn; sc->wme[1].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin); sc->wme[1].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax); sc->wme[1].burst[ac] = IWI_USEC(wmep->wmep_txopLimit); sc->wme[1].acm[ac] = wmep->wmep_acm; /* set WME values for OFDM modulation */ wmep = &iwi_wme_ofdm_params[ac]; sc->wme[2].aifsn[ac] = wmep->wmep_aifsn; sc->wme[2].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin); sc->wme[2].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax); sc->wme[2].burst[ac] = IWI_USEC(wmep->wmep_txopLimit); sc->wme[2].acm[ac] = wmep->wmep_acm; } } static int iwi_wme_setparams(struct iwi_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct chanAccParams chp; const struct wmeParams *wmep; int ac; ieee80211_wme_ic_getparams(ic, &chp); for (ac = 0; ac < WME_NUM_AC; ac++) { /* set WME values for current operating mode */ wmep = &chp.cap_wmeParams[ac]; sc->wme[0].aifsn[ac] = wmep->wmep_aifsn; sc->wme[0].cwmin[ac] = IWI_EXP2(wmep->wmep_logcwmin); sc->wme[0].cwmax[ac] = IWI_EXP2(wmep->wmep_logcwmax); sc->wme[0].burst[ac] = IWI_USEC(wmep->wmep_txopLimit); sc->wme[0].acm[ac] = wmep->wmep_acm; } DPRINTF(("Setting WME parameters\n")); return iwi_cmd(sc, IWI_CMD_SET_WME_PARAMS, sc->wme, sizeof sc->wme); } #undef IWI_USEC #undef IWI_EXP2 static int iwi_wme_update(struct ieee80211com *ic) { struct iwi_softc *sc = ic->ic_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); IWI_LOCK_DECL; /* * We may be called to update the WME parameters in * the adapter at various places. If we're already * associated then initiate the request immediately; * otherwise we assume the params will get sent down * to the adapter as part of the work iwi_auth_and_assoc * does. */ if (vap->iv_state == IEEE80211_S_RUN) { IWI_LOCK(sc); iwi_wme_setparams(sc); IWI_UNLOCK(sc); } return (0); } static int iwi_wme_setie(struct iwi_softc *sc) { struct ieee80211_wme_info wme; memset(&wme, 0, sizeof wme); wme.wme_id = IEEE80211_ELEMID_VENDOR; wme.wme_len = sizeof (struct ieee80211_wme_info) - 2; wme.wme_oui[0] = 0x00; wme.wme_oui[1] = 0x50; wme.wme_oui[2] = 0xf2; wme.wme_type = WME_OUI_TYPE; wme.wme_subtype = WME_INFO_OUI_SUBTYPE; wme.wme_version = WME_VERSION; wme.wme_info = 0; DPRINTF(("Setting WME IE (len=%u)\n", wme.wme_len)); return iwi_cmd(sc, IWI_CMD_SET_WMEIE, &wme, sizeof wme); } /* * Read 16 bits at address 'addr' from the serial EEPROM. */ static uint16_t iwi_read_prom_word(struct iwi_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ IWI_EEPROM_CTL(sc, 0); IWI_EEPROM_CTL(sc, IWI_EEPROM_S); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C); IWI_EEPROM_CTL(sc, IWI_EEPROM_S); /* write start bit (1) */ IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D | IWI_EEPROM_C); /* write READ opcode (10) */ IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_D | IWI_EEPROM_C); IWI_EEPROM_CTL(sc, IWI_EEPROM_S); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C); /* write address A7-A0 */ for (n = 7; n >= 0; n--) { IWI_EEPROM_CTL(sc, IWI_EEPROM_S | (((addr >> n) & 1) << IWI_EEPROM_SHIFT_D)); IWI_EEPROM_CTL(sc, IWI_EEPROM_S | (((addr >> n) & 1) << IWI_EEPROM_SHIFT_D) | IWI_EEPROM_C); } IWI_EEPROM_CTL(sc, IWI_EEPROM_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { IWI_EEPROM_CTL(sc, IWI_EEPROM_S | IWI_EEPROM_C); IWI_EEPROM_CTL(sc, IWI_EEPROM_S); tmp = MEM_READ_4(sc, IWI_MEM_EEPROM_CTL); val |= ((tmp & IWI_EEPROM_Q) >> IWI_EEPROM_SHIFT_Q) << n; } IWI_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ IWI_EEPROM_CTL(sc, IWI_EEPROM_S); IWI_EEPROM_CTL(sc, 0); IWI_EEPROM_CTL(sc, IWI_EEPROM_C); return val; } static void iwi_setcurchan(struct iwi_softc *sc, int chan) { struct ieee80211com *ic = &sc->sc_ic; sc->curchan = chan; ieee80211_radiotap_chan_change(ic); } static void iwi_frame_intr(struct iwi_softc *sc, struct iwi_rx_data *data, int i, struct iwi_frame *frame) { struct ieee80211com *ic = &sc->sc_ic; struct mbuf *mnew, *m; struct ieee80211_node *ni; int type, error, framelen; int8_t rssi, nf; IWI_LOCK_DECL; framelen = le16toh(frame->len); if (framelen < IEEE80211_MIN_LEN || framelen > MCLBYTES) { /* * XXX >MCLBYTES is bogus as it means the h/w dma'd * out of bounds; need to figure out how to limit * frame size in the firmware */ /* XXX stat */ DPRINTFN(1, ("drop rx frame len=%u chan=%u rssi=%u rssi_dbm=%u\n", le16toh(frame->len), frame->chan, frame->rssi, frame->rssi_dbm)); return; } DPRINTFN(5, ("received frame len=%u chan=%u rssi=%u rssi_dbm=%u\n", le16toh(frame->len), frame->chan, frame->rssi, frame->rssi_dbm)); if (frame->chan != sc->curchan) iwi_setcurchan(sc, frame->chan); /* * Try to allocate a new mbuf for this ring element and load it before * processing the current mbuf. If the ring element cannot be loaded, * drop the received packet and reuse the old mbuf. In the unlikely * case that the old mbuf can't be reloaded either, explicitly panic. */ mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { counter_u64_add(ic->ic_ierrors, 1); return; } bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(mnew, void *), MCLBYTES, iwi_dma_map_addr, &data->physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, iwi_dma_map_addr, &data->physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } counter_u64_add(ic->ic_ierrors, 1); return; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = mnew; CSR_WRITE_4(sc, data->reg, data->physaddr); /* finalize mbuf */ m->m_pkthdr.len = m->m_len = sizeof (struct iwi_hdr) + sizeof (struct iwi_frame) + framelen; m_adj(m, sizeof (struct iwi_hdr) + sizeof (struct iwi_frame)); rssi = frame->rssi_dbm; nf = -95; if (ieee80211_radiotap_active(ic)) { struct iwi_rx_radiotap_header *tap = &sc->sc_rxtap; tap->wr_flags = 0; tap->wr_antsignal = rssi; tap->wr_antnoise = nf; tap->wr_rate = iwi_cvtrate(frame->rate); tap->wr_antenna = frame->antenna; } IWI_UNLOCK(sc); ni = ieee80211_find_rxnode(ic, mtod(m, struct ieee80211_frame_min *)); if (ni != NULL) { type = ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else type = ieee80211_input_all(ic, m, rssi, nf); IWI_LOCK(sc); if (sc->sc_softled) { /* * Blink for any data frame. Otherwise do a * heartbeat-style blink when idle. The latter * is mainly for station mode where we depend on * periodic beacon frames to trigger the poll event. */ if (type == IEEE80211_FC0_TYPE_DATA) { sc->sc_rxrate = frame->rate; iwi_led_event(sc, IWI_LED_RX); } else if (ticks - sc->sc_ledevent >= sc->sc_ledidle) iwi_led_event(sc, IWI_LED_POLL); } } /* * Check for an association response frame to see if QoS * has been negotiated. We parse just enough to figure * out if we're supposed to use QoS. The proper solution * is to pass the frame up so ieee80211_input can do the * work but that's made hard by how things currently are * done in the driver. */ static void iwi_checkforqos(struct ieee80211vap *vap, const struct ieee80211_frame *wh, int len) { #define SUBTYPE(wh) ((wh)->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) const uint8_t *frm, *efrm, *wme; struct ieee80211_node *ni; uint16_t capinfo, status, associd; /* NB: +8 for capinfo, status, associd, and first ie */ if (!(sizeof(*wh)+8 < len && len < IEEE80211_MAX_LEN) || SUBTYPE(wh) != IEEE80211_FC0_SUBTYPE_ASSOC_RESP) return; /* * asresp frame format * [2] capability information * [2] status * [2] association ID * [tlv] supported rates * [tlv] extended supported rates * [tlv] WME */ frm = (const uint8_t *)&wh[1]; efrm = ((const uint8_t *) wh) + len; capinfo = le16toh(*(const uint16_t *)frm); frm += 2; status = le16toh(*(const uint16_t *)frm); frm += 2; associd = le16toh(*(const uint16_t *)frm); frm += 2; wme = NULL; while (efrm - frm > 1) { IEEE80211_VERIFY_LENGTH(efrm - frm, frm[1] + 2, return); switch (*frm) { case IEEE80211_ELEMID_VENDOR: if (iswmeoui(frm)) wme = frm; break; } frm += frm[1] + 2; } ni = ieee80211_ref_node(vap->iv_bss); ni->ni_capinfo = capinfo; ni->ni_associd = associd & 0x3fff; if (wme != NULL) ni->ni_flags |= IEEE80211_NODE_QOS; else ni->ni_flags &= ~IEEE80211_NODE_QOS; ieee80211_free_node(ni); #undef SUBTYPE } static void iwi_notif_link_quality(struct iwi_softc *sc, struct iwi_notif *notif) { struct iwi_notif_link_quality *lq; int len; len = le16toh(notif->len); DPRINTFN(5, ("Notification (%u) - len=%d, sizeof=%zu\n", notif->type, len, sizeof(struct iwi_notif_link_quality) )); /* enforce length */ if (len != sizeof(struct iwi_notif_link_quality)) { DPRINTFN(5, ("Notification: (%u) too short (%d)\n", notif->type, len)); return; } lq = (struct iwi_notif_link_quality *)(notif + 1); memcpy(&sc->sc_linkqual, lq, sizeof(sc->sc_linkqual)); sc->sc_linkqual_valid = 1; } /* * Task queue callbacks for iwi_notification_intr used to avoid LOR's. */ static void iwi_notification_intr(struct iwi_softc *sc, struct iwi_notif *notif) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct iwi_notif_scan_channel *chan; struct iwi_notif_scan_complete *scan; struct iwi_notif_authentication *auth; struct iwi_notif_association *assoc; struct iwi_notif_beacon_state *beacon; switch (notif->type) { case IWI_NOTIF_TYPE_SCAN_CHANNEL: chan = (struct iwi_notif_scan_channel *)(notif + 1); DPRINTFN(3, ("Scan of channel %u complete (%u)\n", ieee80211_ieee2mhz(chan->nchan, 0), chan->nchan)); /* Reset the timer, the scan is still going */ sc->sc_state_timer = 3; break; case IWI_NOTIF_TYPE_SCAN_COMPLETE: scan = (struct iwi_notif_scan_complete *)(notif + 1); DPRINTFN(2, ("Scan completed (%u, %u)\n", scan->nchan, scan->status)); IWI_STATE_END(sc, IWI_FW_SCANNING); /* * Monitor mode works by doing a passive scan to set * the channel and enable rx. Because we don't want * to abort a scan lest the firmware crash we scan * for a short period of time and automatically restart * the scan when notified the sweep has completed. */ if (vap->iv_opmode == IEEE80211_M_MONITOR) { ieee80211_runtask(ic, &sc->sc_monitortask); break; } if (scan->status == IWI_SCAN_COMPLETED) { /* NB: don't need to defer, net80211 does it for us */ ieee80211_scan_next(vap); } break; case IWI_NOTIF_TYPE_AUTHENTICATION: auth = (struct iwi_notif_authentication *)(notif + 1); switch (auth->state) { case IWI_AUTH_SUCCESS: DPRINTFN(2, ("Authentication succeeeded\n")); ieee80211_new_state(vap, IEEE80211_S_ASSOC, -1); break; case IWI_AUTH_FAIL: /* * These are delivered as an unsolicited deauth * (e.g. due to inactivity) or in response to an * associate request. */ sc->flags &= ~IWI_FLAG_ASSOCIATED; if (vap->iv_state != IEEE80211_S_RUN) { DPRINTFN(2, ("Authentication failed\n")); vap->iv_stats.is_rx_auth_fail++; IWI_STATE_END(sc, IWI_FW_ASSOCIATING); } else { DPRINTFN(2, ("Deauthenticated\n")); vap->iv_stats.is_rx_deauth++; } ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); break; case IWI_AUTH_SENT_1: case IWI_AUTH_RECV_2: case IWI_AUTH_SEQ1_PASS: break; case IWI_AUTH_SEQ1_FAIL: DPRINTFN(2, ("Initial authentication handshake failed; " "you probably need shared key\n")); vap->iv_stats.is_rx_auth_fail++; IWI_STATE_END(sc, IWI_FW_ASSOCIATING); /* XXX retry shared key when in auto */ break; default: device_printf(sc->sc_dev, "unknown authentication state %u\n", auth->state); break; } break; case IWI_NOTIF_TYPE_ASSOCIATION: assoc = (struct iwi_notif_association *)(notif + 1); switch (assoc->state) { case IWI_AUTH_SUCCESS: /* re-association, do nothing */ break; case IWI_ASSOC_SUCCESS: DPRINTFN(2, ("Association succeeded\n")); sc->flags |= IWI_FLAG_ASSOCIATED; IWI_STATE_END(sc, IWI_FW_ASSOCIATING); iwi_checkforqos(vap, (const struct ieee80211_frame *)(assoc+1), le16toh(notif->len) - sizeof(*assoc) - 1); ieee80211_new_state(vap, IEEE80211_S_RUN, -1); break; case IWI_ASSOC_INIT: sc->flags &= ~IWI_FLAG_ASSOCIATED; switch (sc->fw_state) { case IWI_FW_ASSOCIATING: DPRINTFN(2, ("Association failed\n")); IWI_STATE_END(sc, IWI_FW_ASSOCIATING); ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); break; case IWI_FW_DISASSOCIATING: DPRINTFN(2, ("Dissassociated\n")); IWI_STATE_END(sc, IWI_FW_DISASSOCIATING); vap->iv_stats.is_rx_disassoc++; ieee80211_new_state(vap, IEEE80211_S_SCAN, -1); break; } break; default: device_printf(sc->sc_dev, "unknown association state %u\n", assoc->state); break; } break; case IWI_NOTIF_TYPE_BEACON: /* XXX check struct length */ beacon = (struct iwi_notif_beacon_state *)(notif + 1); DPRINTFN(5, ("Beacon state (%u, %u)\n", beacon->state, le32toh(beacon->number))); if (beacon->state == IWI_BEACON_MISS) { /* * The firmware notifies us of every beacon miss * so we need to track the count against the * configured threshold before notifying the * 802.11 layer. * XXX try to roam, drop assoc only on much higher count */ if (le32toh(beacon->number) >= vap->iv_bmissthreshold) { DPRINTF(("Beacon miss: %u >= %u\n", le32toh(beacon->number), vap->iv_bmissthreshold)); vap->iv_stats.is_beacon_miss++; /* * It's pointless to notify the 802.11 layer * as it'll try to send a probe request (which * we'll discard) and then timeout and drop us * into scan state. Instead tell the firmware * to disassociate and then on completion we'll * kick the state machine to scan. */ ieee80211_runtask(ic, &sc->sc_disassoctask); } } break; case IWI_NOTIF_TYPE_CALIBRATION: case IWI_NOTIF_TYPE_NOISE: /* XXX handle? */ DPRINTFN(5, ("Notification (%u)\n", notif->type)); break; case IWI_NOTIF_TYPE_LINK_QUALITY: iwi_notif_link_quality(sc, notif); break; default: DPRINTF(("unknown notification type %u flags 0x%x len %u\n", notif->type, notif->flags, le16toh(notif->len))); break; } } static void iwi_rx_intr(struct iwi_softc *sc) { struct iwi_rx_data *data; struct iwi_hdr *hdr; uint32_t hw; hw = CSR_READ_4(sc, IWI_CSR_RX_RIDX); for (; sc->rxq.cur != hw;) { data = &sc->rxq.data[sc->rxq.cur]; bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); hdr = mtod(data->m, struct iwi_hdr *); switch (hdr->type) { case IWI_HDR_TYPE_FRAME: iwi_frame_intr(sc, data, sc->rxq.cur, (struct iwi_frame *)(hdr + 1)); break; case IWI_HDR_TYPE_NOTIF: iwi_notification_intr(sc, (struct iwi_notif *)(hdr + 1)); break; default: device_printf(sc->sc_dev, "unknown hdr type %u\n", hdr->type); } DPRINTFN(15, ("rx done idx=%u\n", sc->rxq.cur)); sc->rxq.cur = (sc->rxq.cur + 1) % IWI_RX_RING_COUNT; } /* tell the firmware what we have processed */ hw = (hw == 0) ? IWI_RX_RING_COUNT - 1 : hw - 1; CSR_WRITE_4(sc, IWI_CSR_RX_WIDX, hw); } static void iwi_tx_intr(struct iwi_softc *sc, struct iwi_tx_ring *txq) { struct iwi_tx_data *data; uint32_t hw; hw = CSR_READ_4(sc, txq->csr_ridx); while (txq->next != hw) { data = &txq->data[txq->next]; DPRINTFN(15, ("tx done idx=%u\n", txq->next)); bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->data_dmat, data->map); ieee80211_tx_complete(data->ni, data->m, 0); data->ni = NULL; data->m = NULL; txq->queued--; txq->next = (txq->next + 1) % IWI_TX_RING_COUNT; } sc->sc_tx_timer = 0; if (sc->sc_softled) iwi_led_event(sc, IWI_LED_TX); iwi_start(sc); } static void iwi_fatal_error_intr(struct iwi_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); device_printf(sc->sc_dev, "firmware error\n"); if (vap != NULL) ieee80211_cancel_scan(vap); ieee80211_runtask(ic, &sc->sc_restarttask); sc->flags &= ~IWI_FLAG_BUSY; sc->sc_busy_timer = 0; wakeup(sc); } static void iwi_radio_off_intr(struct iwi_softc *sc) { ieee80211_runtask(&sc->sc_ic, &sc->sc_radiofftask); } static void iwi_intr(void *arg) { struct iwi_softc *sc = arg; uint32_t r; IWI_LOCK_DECL; IWI_LOCK(sc); if ((r = CSR_READ_4(sc, IWI_CSR_INTR)) == 0 || r == 0xffffffff) { IWI_UNLOCK(sc); return; } /* acknowledge interrupts */ CSR_WRITE_4(sc, IWI_CSR_INTR, r); if (r & IWI_INTR_FATAL_ERROR) { iwi_fatal_error_intr(sc); goto done; } if (r & IWI_INTR_FW_INITED) { if (!(r & (IWI_INTR_FATAL_ERROR | IWI_INTR_PARITY_ERROR))) wakeup(sc); } if (r & IWI_INTR_RADIO_OFF) iwi_radio_off_intr(sc); if (r & IWI_INTR_CMD_DONE) { sc->flags &= ~IWI_FLAG_BUSY; sc->sc_busy_timer = 0; wakeup(sc); } if (r & IWI_INTR_TX1_DONE) iwi_tx_intr(sc, &sc->txq[0]); if (r & IWI_INTR_TX2_DONE) iwi_tx_intr(sc, &sc->txq[1]); if (r & IWI_INTR_TX3_DONE) iwi_tx_intr(sc, &sc->txq[2]); if (r & IWI_INTR_TX4_DONE) iwi_tx_intr(sc, &sc->txq[3]); if (r & IWI_INTR_RX_DONE) iwi_rx_intr(sc); if (r & IWI_INTR_PARITY_ERROR) { /* XXX rate-limit */ device_printf(sc->sc_dev, "parity error\n"); } done: IWI_UNLOCK(sc); } static int iwi_cmd(struct iwi_softc *sc, uint8_t type, void *data, uint8_t len) { struct iwi_cmd_desc *desc; IWI_LOCK_ASSERT(sc); if (sc->flags & IWI_FLAG_BUSY) { device_printf(sc->sc_dev, "%s: cmd %d not sent, busy\n", __func__, type); return EAGAIN; } sc->flags |= IWI_FLAG_BUSY; sc->sc_busy_timer = 2; desc = &sc->cmdq.desc[sc->cmdq.cur]; desc->hdr.type = IWI_HDR_TYPE_COMMAND; desc->hdr.flags = IWI_HDR_FLAG_IRQ; desc->type = type; desc->len = len; memcpy(desc->data, data, len); bus_dmamap_sync(sc->cmdq.desc_dmat, sc->cmdq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(2, ("sending command idx=%u type=%u len=%u\n", sc->cmdq.cur, type, len)); sc->cmdq.cur = (sc->cmdq.cur + 1) % IWI_CMD_RING_COUNT; CSR_WRITE_4(sc, IWI_CSR_CMD_WIDX, sc->cmdq.cur); return msleep(sc, &sc->sc_mtx, 0, "iwicmd", hz); } static void iwi_write_ibssnode(struct iwi_softc *sc, const u_int8_t addr[IEEE80211_ADDR_LEN], int entry) { struct iwi_ibssnode node; /* write node information into NIC memory */ memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.bssid, addr); DPRINTF(("%s mac %6D station %u\n", __func__, node.bssid, ":", entry)); CSR_WRITE_REGION_1(sc, IWI_CSR_NODE_BASE + entry * sizeof node, (uint8_t *)&node, sizeof node); } static int iwi_tx_start(struct iwi_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, int ac) { struct ieee80211vap *vap = ni->ni_vap; struct iwi_node *in = (struct iwi_node *)ni; const struct ieee80211_frame *wh; struct ieee80211_key *k; struct iwi_tx_ring *txq = &sc->txq[ac]; struct iwi_tx_data *data; struct iwi_tx_desc *desc; struct mbuf *mnew; bus_dma_segment_t segs[IWI_MAX_NSEG]; int error, nsegs, hdrlen, i; int ismcast, flags, xflags, staid; IWI_LOCK_ASSERT(sc); wh = mtod(m0, const struct ieee80211_frame *); /* NB: only data frames use this path */ hdrlen = ieee80211_hdrsize(wh); ismcast = IEEE80211_IS_MULTICAST(wh->i_addr1); flags = xflags = 0; if (!ismcast) flags |= IWI_DATA_FLAG_NEED_ACK; if (vap->iv_flags & IEEE80211_F_SHPREAMBLE) flags |= IWI_DATA_FLAG_SHPREAMBLE; if (IEEE80211_QOS_HAS_SEQ(wh)) { xflags |= IWI_DATA_XFLAG_QOS; if (ieee80211_wme_vap_ac_is_noack(vap, ac)) flags &= ~IWI_DATA_FLAG_NEED_ACK; } /* * This is only used in IBSS mode where the firmware expect an index * in a h/w table instead of a destination address. */ if (vap->iv_opmode == IEEE80211_M_IBSS) { if (!ismcast) { if (in->in_station == -1) { in->in_station = alloc_unr(sc->sc_unr); if (in->in_station == -1) { /* h/w table is full */ if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); m_freem(m0); ieee80211_free_node(ni); return 0; } iwi_write_ibssnode(sc, ni->ni_macaddr, in->in_station); } staid = in->in_station; } else { /* * Multicast addresses have no associated node * so there will be no station entry. We reserve * entry 0 for one mcast address and use that. * If there are many being used this will be * expensive and we'll need to do a better job * but for now this handles the broadcast case. */ if (!IEEE80211_ADDR_EQ(wh->i_addr1, sc->sc_mcast)) { IEEE80211_ADDR_COPY(sc->sc_mcast, wh->i_addr1); iwi_write_ibssnode(sc, sc->sc_mcast, 0); } staid = 0; } } else staid = 0; if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { struct iwi_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; ieee80211_radiotap_tx(vap, m0); } data = &txq->data[txq->cur]; desc = &txq->desc[txq->cur]; /* save and trim IEEE802.11 header */ m_copydata(m0, 0, hdrlen, (caddr_t)&desc->wh); m_adj(m0, hdrlen); error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_NOWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } } data->m = m0; data->ni = ni; desc->hdr.type = IWI_HDR_TYPE_DATA; desc->hdr.flags = IWI_HDR_FLAG_IRQ; desc->station = staid; desc->cmd = IWI_DATA_CMD_TX; desc->len = htole16(m0->m_pkthdr.len); desc->flags = flags; desc->xflags = xflags; #if 0 if (vap->iv_flags & IEEE80211_F_PRIVACY) desc->wep_txkey = vap->iv_def_txkey; else #endif desc->flags |= IWI_DATA_FLAG_NO_WEP; desc->nseg = htole32(nsegs); for (i = 0; i < nsegs; i++) { desc->seg_addr[i] = htole32(segs[i].ds_addr); desc->seg_len[i] = htole16(segs[i].ds_len); } bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(5, ("sending data frame txq=%u idx=%u len=%u nseg=%u\n", ac, txq->cur, le16toh(desc->len), nsegs)); txq->queued++; txq->cur = (txq->cur + 1) % IWI_TX_RING_COUNT; CSR_WRITE_4(sc, txq->csr_widx, txq->cur); return 0; } static int iwi_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { /* no support; just discard */ m_freem(m); ieee80211_free_node(ni); return 0; } static int iwi_transmit(struct ieee80211com *ic, struct mbuf *m) { struct iwi_softc *sc = ic->ic_softc; int error; IWI_LOCK_DECL; IWI_LOCK(sc); if (!sc->sc_running) { IWI_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { IWI_UNLOCK(sc); return (error); } iwi_start(sc); IWI_UNLOCK(sc); return (0); } static void iwi_start(struct iwi_softc *sc) { struct mbuf *m; struct ieee80211_node *ni; int ac; IWI_LOCK_ASSERT(sc); while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ac = M_WME_GETAC(m); if (sc->txq[ac].queued > IWI_TX_RING_COUNT - 8) { /* there is no place left in this ring; tail drop */ /* XXX tail drop */ mbufq_prepend(&sc->sc_snd, m); break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (iwi_tx_start(sc, m, ni, ac) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); break; } sc->sc_tx_timer = 5; } } static void iwi_watchdog(void *arg) { struct iwi_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; IWI_LOCK_ASSERT(sc); if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); counter_u64_add(ic->ic_oerrors, 1); ieee80211_runtask(ic, &sc->sc_restarttask); } } if (sc->sc_state_timer > 0) { if (--sc->sc_state_timer == 0) { device_printf(sc->sc_dev, "firmware stuck in state %d, resetting\n", sc->fw_state); if (sc->fw_state == IWI_FW_SCANNING) ieee80211_cancel_scan(TAILQ_FIRST(&ic->ic_vaps)); ieee80211_runtask(ic, &sc->sc_restarttask); sc->sc_state_timer = 3; } } if (sc->sc_busy_timer > 0) { if (--sc->sc_busy_timer == 0) { device_printf(sc->sc_dev, "firmware command timeout, resetting\n"); ieee80211_runtask(ic, &sc->sc_restarttask); } } callout_reset(&sc->sc_wdtimer, hz, iwi_watchdog, sc); } static void iwi_parent(struct ieee80211com *ic) { struct iwi_softc *sc = ic->ic_softc; int startall = 0; IWI_LOCK_DECL; IWI_LOCK(sc); if (ic->ic_nrunning > 0) { if (!sc->sc_running) { iwi_init_locked(sc); startall = 1; } } else if (sc->sc_running) iwi_stop_locked(sc); IWI_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static int iwi_ioctl(struct ieee80211com *ic, u_long cmd, void *data) { struct ifreq *ifr = data; struct iwi_softc *sc = ic->ic_softc; int error; IWI_LOCK_DECL; IWI_LOCK(sc); switch (cmd) { case SIOCGIWISTATS: /* XXX validate permissions/memory/etc? */ error = copyout(&sc->sc_linkqual, ifr->ifr_data, sizeof(struct iwi_notif_link_quality)); break; case SIOCZIWISTATS: memset(&sc->sc_linkqual, 0, sizeof(struct iwi_notif_link_quality)); error = 0; break; default: error = ENOTTY; break; } IWI_UNLOCK(sc); return (error); } static void iwi_stop_master(struct iwi_softc *sc) { uint32_t tmp; int ntries; /* disable interrupts */ CSR_WRITE_4(sc, IWI_CSR_INTR_MASK, 0); CSR_WRITE_4(sc, IWI_CSR_RST, IWI_RST_STOP_MASTER); for (ntries = 0; ntries < 5; ntries++) { if (CSR_READ_4(sc, IWI_CSR_RST) & IWI_RST_MASTER_DISABLED) break; DELAY(10); } if (ntries == 5) device_printf(sc->sc_dev, "timeout waiting for master\n"); tmp = CSR_READ_4(sc, IWI_CSR_RST); CSR_WRITE_4(sc, IWI_CSR_RST, tmp | IWI_RST_PRINCETON_RESET); sc->flags &= ~IWI_FLAG_FW_INITED; } static int iwi_reset(struct iwi_softc *sc) { uint32_t tmp; int i, ntries; iwi_stop_master(sc); tmp = CSR_READ_4(sc, IWI_CSR_CTL); CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_INIT); CSR_WRITE_4(sc, IWI_CSR_READ_INT, IWI_READ_INT_INIT_HOST); /* wait for clock stabilization */ for (ntries = 0; ntries < 1000; ntries++) { if (CSR_READ_4(sc, IWI_CSR_CTL) & IWI_CTL_CLOCK_READY) break; DELAY(200); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for clock stabilization\n"); return EIO; } tmp = CSR_READ_4(sc, IWI_CSR_RST); CSR_WRITE_4(sc, IWI_CSR_RST, tmp | IWI_RST_SOFT_RESET); DELAY(10); tmp = CSR_READ_4(sc, IWI_CSR_CTL); CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_INIT); /* clear NIC memory */ CSR_WRITE_4(sc, IWI_CSR_AUTOINC_ADDR, 0); for (i = 0; i < 0xc000; i++) CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, 0); return 0; } static const struct iwi_firmware_ohdr * iwi_setup_ofw(struct iwi_softc *sc, struct iwi_fw *fw) { const struct firmware *fp = fw->fp; const struct iwi_firmware_ohdr *hdr; if (fp->datasize < sizeof (struct iwi_firmware_ohdr)) { device_printf(sc->sc_dev, "image '%s' too small\n", fp->name); return NULL; } hdr = (const struct iwi_firmware_ohdr *)fp->data; if ((IWI_FW_GET_MAJOR(le32toh(hdr->version)) != IWI_FW_REQ_MAJOR) || (IWI_FW_GET_MINOR(le32toh(hdr->version)) != IWI_FW_REQ_MINOR)) { device_printf(sc->sc_dev, "version for '%s' %d.%d != %d.%d\n", fp->name, IWI_FW_GET_MAJOR(le32toh(hdr->version)), IWI_FW_GET_MINOR(le32toh(hdr->version)), IWI_FW_REQ_MAJOR, IWI_FW_REQ_MINOR); return NULL; } fw->data = ((const char *) fp->data) + sizeof(struct iwi_firmware_ohdr); fw->size = fp->datasize - sizeof(struct iwi_firmware_ohdr); fw->name = fp->name; return hdr; } static const struct iwi_firmware_ohdr * iwi_setup_oucode(struct iwi_softc *sc, struct iwi_fw *fw) { const struct iwi_firmware_ohdr *hdr; hdr = iwi_setup_ofw(sc, fw); if (hdr != NULL && le32toh(hdr->mode) != IWI_FW_MODE_UCODE) { device_printf(sc->sc_dev, "%s is not a ucode image\n", fw->name); hdr = NULL; } return hdr; } static void iwi_getfw(struct iwi_fw *fw, const char *fwname, struct iwi_fw *uc, const char *ucname) { if (fw->fp == NULL) fw->fp = firmware_get(fwname); /* NB: pre-3.0 ucode is packaged separately */ if (uc->fp == NULL && fw->fp != NULL && fw->fp->version < 300) uc->fp = firmware_get(ucname); } /* * Get the required firmware images if not already loaded. * Note that we hold firmware images so long as the device * is marked up in case we need to reload them on device init. * This is necessary because we re-init the device sometimes * from a context where we cannot read from the filesystem * (e.g. from the taskqueue thread when rfkill is re-enabled). * XXX return 0 on success, 1 on error. * * NB: the order of get'ing and put'ing images here is * intentional to support handling firmware images bundled * by operating mode and/or all together in one file with * the boot firmware as "master". */ static int iwi_get_firmware(struct iwi_softc *sc, enum ieee80211_opmode opmode) { const struct iwi_firmware_hdr *hdr; const struct firmware *fp; /* invalidate cached firmware on mode change */ if (sc->fw_mode != opmode) iwi_put_firmware(sc); switch (opmode) { case IEEE80211_M_STA: iwi_getfw(&sc->fw_fw, "iwi_bss", &sc->fw_uc, "iwi_ucode_bss"); break; case IEEE80211_M_IBSS: iwi_getfw(&sc->fw_fw, "iwi_ibss", &sc->fw_uc, "iwi_ucode_ibss"); break; case IEEE80211_M_MONITOR: iwi_getfw(&sc->fw_fw, "iwi_monitor", &sc->fw_uc, "iwi_ucode_monitor"); break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); return EINVAL; } fp = sc->fw_fw.fp; if (fp == NULL) { device_printf(sc->sc_dev, "could not load firmware\n"); goto bad; } if (fp->version < 300) { /* * Firmware prior to 3.0 was packaged as separate * boot, firmware, and ucode images. Verify the * ucode image was read in, retrieve the boot image * if needed, and check version stamps for consistency. * The version stamps in the data are also checked * above; this is a bit paranoid but is a cheap * safeguard against mis-packaging. */ if (sc->fw_uc.fp == NULL) { device_printf(sc->sc_dev, "could not load ucode\n"); goto bad; } if (sc->fw_boot.fp == NULL) { sc->fw_boot.fp = firmware_get("iwi_boot"); if (sc->fw_boot.fp == NULL) { device_printf(sc->sc_dev, "could not load boot firmware\n"); goto bad; } } if (sc->fw_boot.fp->version != sc->fw_fw.fp->version || sc->fw_boot.fp->version != sc->fw_uc.fp->version) { device_printf(sc->sc_dev, "firmware version mismatch: " "'%s' is %d, '%s' is %d, '%s' is %d\n", sc->fw_boot.fp->name, sc->fw_boot.fp->version, sc->fw_uc.fp->name, sc->fw_uc.fp->version, sc->fw_fw.fp->name, sc->fw_fw.fp->version ); goto bad; } /* * Check and setup each image. */ if (iwi_setup_oucode(sc, &sc->fw_uc) == NULL || iwi_setup_ofw(sc, &sc->fw_boot) == NULL || iwi_setup_ofw(sc, &sc->fw_fw) == NULL) goto bad; } else { /* * Check and setup combined image. */ if (fp->datasize < sizeof(struct iwi_firmware_hdr)) { device_printf(sc->sc_dev, "image '%s' too small\n", fp->name); goto bad; } hdr = (const struct iwi_firmware_hdr *)fp->data; if (fp->datasize < sizeof(*hdr) + le32toh(hdr->bsize) + le32toh(hdr->usize) + le32toh(hdr->fsize)) { device_printf(sc->sc_dev, "image '%s' too small (2)\n", fp->name); goto bad; } sc->fw_boot.data = ((const char *) fp->data) + sizeof(*hdr); sc->fw_boot.size = le32toh(hdr->bsize); sc->fw_boot.name = fp->name; sc->fw_uc.data = sc->fw_boot.data + sc->fw_boot.size; sc->fw_uc.size = le32toh(hdr->usize); sc->fw_uc.name = fp->name; sc->fw_fw.data = sc->fw_uc.data + sc->fw_uc.size; sc->fw_fw.size = le32toh(hdr->fsize); sc->fw_fw.name = fp->name; } #if 0 device_printf(sc->sc_dev, "boot %d ucode %d fw %d bytes\n", sc->fw_boot.size, sc->fw_uc.size, sc->fw_fw.size); #endif sc->fw_mode = opmode; return 0; bad: iwi_put_firmware(sc); return 1; } static void iwi_put_fw(struct iwi_fw *fw) { if (fw->fp != NULL) { firmware_put(fw->fp, FIRMWARE_UNLOAD); fw->fp = NULL; } fw->data = NULL; fw->size = 0; fw->name = NULL; } /* * Release any cached firmware images. */ static void iwi_put_firmware(struct iwi_softc *sc) { iwi_put_fw(&sc->fw_uc); iwi_put_fw(&sc->fw_fw); iwi_put_fw(&sc->fw_boot); } static int iwi_load_ucode(struct iwi_softc *sc, const struct iwi_fw *fw) { uint32_t tmp; const uint16_t *w; const char *uc = fw->data; size_t size = fw->size; int i, ntries, error; IWI_LOCK_ASSERT(sc); error = 0; CSR_WRITE_4(sc, IWI_CSR_RST, CSR_READ_4(sc, IWI_CSR_RST) | IWI_RST_STOP_MASTER); for (ntries = 0; ntries < 5; ntries++) { if (CSR_READ_4(sc, IWI_CSR_RST) & IWI_RST_MASTER_DISABLED) break; DELAY(10); } if (ntries == 5) { device_printf(sc->sc_dev, "timeout waiting for master\n"); error = EIO; goto fail; } MEM_WRITE_4(sc, 0x3000e0, 0x80000000); DELAY(5000); tmp = CSR_READ_4(sc, IWI_CSR_RST); tmp &= ~IWI_RST_PRINCETON_RESET; CSR_WRITE_4(sc, IWI_CSR_RST, tmp); DELAY(5000); MEM_WRITE_4(sc, 0x3000e0, 0); DELAY(1000); MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, 1); DELAY(1000); MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, 0); DELAY(1000); MEM_WRITE_1(sc, 0x200000, 0x00); MEM_WRITE_1(sc, 0x200000, 0x40); DELAY(1000); /* write microcode into adapter memory */ for (w = (const uint16_t *)uc; size > 0; w++, size -= 2) MEM_WRITE_2(sc, 0x200010, htole16(*w)); MEM_WRITE_1(sc, 0x200000, 0x00); MEM_WRITE_1(sc, 0x200000, 0x80); /* wait until we get an answer */ for (ntries = 0; ntries < 100; ntries++) { if (MEM_READ_1(sc, 0x200000) & 1) break; DELAY(100); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for ucode to initialize\n"); error = EIO; goto fail; } /* read the answer or the firmware will not initialize properly */ for (i = 0; i < 7; i++) MEM_READ_4(sc, 0x200004); MEM_WRITE_1(sc, 0x200000, 0x00); fail: return error; } /* macro to handle unaligned little endian data in firmware image */ #define GETLE32(p) ((p)[0] | (p)[1] << 8 | (p)[2] << 16 | (p)[3] << 24) static int iwi_load_firmware(struct iwi_softc *sc, const struct iwi_fw *fw) { u_char *p, *end; uint32_t sentinel, ctl, src, dst, sum, len, mlen, tmp; int ntries, error; IWI_LOCK_ASSERT(sc); /* copy firmware image to DMA memory */ memcpy(sc->fw_virtaddr, fw->data, fw->size); /* make sure the adapter will get up-to-date values */ bus_dmamap_sync(sc->fw_dmat, sc->fw_map, BUS_DMASYNC_PREWRITE); /* tell the adapter where the command blocks are stored */ MEM_WRITE_4(sc, 0x3000a0, 0x27000); /* * Store command blocks into adapter's internal memory using register * indirections. The adapter will read the firmware image through DMA * using information stored in command blocks. */ src = sc->fw_physaddr; p = sc->fw_virtaddr; end = p + fw->size; CSR_WRITE_4(sc, IWI_CSR_AUTOINC_ADDR, 0x27000); while (p < end) { dst = GETLE32(p); p += 4; src += 4; len = GETLE32(p); p += 4; src += 4; p += len; while (len > 0) { mlen = min(len, IWI_CB_MAXDATALEN); ctl = IWI_CB_DEFAULT_CTL | mlen; sum = ctl ^ src ^ dst; /* write a command block */ CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, ctl); CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, src); CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, dst); CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, sum); src += mlen; dst += mlen; len -= mlen; } } /* write a fictive final command block (sentinel) */ sentinel = CSR_READ_4(sc, IWI_CSR_AUTOINC_ADDR); CSR_WRITE_4(sc, IWI_CSR_AUTOINC_DATA, 0); tmp = CSR_READ_4(sc, IWI_CSR_RST); tmp &= ~(IWI_RST_MASTER_DISABLED | IWI_RST_STOP_MASTER); CSR_WRITE_4(sc, IWI_CSR_RST, tmp); /* tell the adapter to start processing command blocks */ MEM_WRITE_4(sc, 0x3000a4, 0x540100); /* wait until the adapter reaches the sentinel */ for (ntries = 0; ntries < 400; ntries++) { if (MEM_READ_4(sc, 0x3000d0) >= sentinel) break; DELAY(100); } /* sync dma, just in case */ bus_dmamap_sync(sc->fw_dmat, sc->fw_map, BUS_DMASYNC_POSTWRITE); if (ntries == 400) { device_printf(sc->sc_dev, "timeout processing command blocks for %s firmware\n", fw->name); return EIO; } /* we're done with command blocks processing */ MEM_WRITE_4(sc, 0x3000a4, 0x540c00); /* allow interrupts so we know when the firmware is ready */ CSR_WRITE_4(sc, IWI_CSR_INTR_MASK, IWI_INTR_MASK); /* tell the adapter to initialize the firmware */ CSR_WRITE_4(sc, IWI_CSR_RST, 0); tmp = CSR_READ_4(sc, IWI_CSR_CTL); CSR_WRITE_4(sc, IWI_CSR_CTL, tmp | IWI_CTL_ALLOW_STANDBY); /* wait at most one second for firmware initialization to complete */ if ((error = msleep(sc, &sc->sc_mtx, 0, "iwiinit", hz)) != 0) { device_printf(sc->sc_dev, "timeout waiting for %s firmware " "initialization to complete\n", fw->name); } return error; } static int iwi_setpowermode(struct iwi_softc *sc, struct ieee80211vap *vap) { uint32_t data; if (vap->iv_flags & IEEE80211_F_PMGTON) { /* XXX set more fine-grained operation */ data = htole32(IWI_POWER_MODE_MAX); } else data = htole32(IWI_POWER_MODE_CAM); DPRINTF(("Setting power mode to %u\n", le32toh(data))); return iwi_cmd(sc, IWI_CMD_SET_POWER_MODE, &data, sizeof data); } static int iwi_setwepkeys(struct iwi_softc *sc, struct ieee80211vap *vap) { struct iwi_wep_key wepkey; struct ieee80211_key *wk; int error, i; for (i = 0; i < IEEE80211_WEP_NKID; i++) { wk = &vap->iv_nw_keys[i]; wepkey.cmd = IWI_WEP_KEY_CMD_SETKEY; wepkey.idx = i; wepkey.len = wk->wk_keylen; memset(wepkey.key, 0, sizeof wepkey.key); memcpy(wepkey.key, wk->wk_key, wk->wk_keylen); DPRINTF(("Setting wep key index %u len %u\n", wepkey.idx, wepkey.len)); error = iwi_cmd(sc, IWI_CMD_SET_WEP_KEY, &wepkey, sizeof wepkey); if (error != 0) return error; } return 0; } static int iwi_config(struct iwi_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct iwi_configuration config; struct iwi_rateset rs; struct iwi_txpower power; uint32_t data; int error, i; IWI_LOCK_ASSERT(sc); DPRINTF(("Setting MAC address to %6D\n", ic->ic_macaddr, ":")); error = iwi_cmd(sc, IWI_CMD_SET_MAC_ADDRESS, ic->ic_macaddr, IEEE80211_ADDR_LEN); if (error != 0) return error; memset(&config, 0, sizeof config); config.bluetooth_coexistence = sc->bluetooth; config.silence_threshold = 0x1e; config.antenna = sc->antenna; config.multicast_enabled = 1; config.answer_pbreq = (ic->ic_opmode == IEEE80211_M_IBSS) ? 1 : 0; config.disable_unicast_decryption = 1; config.disable_multicast_decryption = 1; if (ic->ic_opmode == IEEE80211_M_MONITOR) { config.allow_invalid_frames = 1; config.allow_beacon_and_probe_resp = 1; config.allow_mgt = 1; } DPRINTF(("Configuring adapter\n")); error = iwi_cmd(sc, IWI_CMD_SET_CONFIG, &config, sizeof config); if (error != 0) return error; if (ic->ic_opmode == IEEE80211_M_IBSS) { power.mode = IWI_MODE_11B; power.nchan = 11; for (i = 0; i < 11; i++) { power.chan[i].chan = i + 1; power.chan[i].power = IWI_TXPOWER_MAX; } DPRINTF(("Setting .11b channels tx power\n")); error = iwi_cmd(sc, IWI_CMD_SET_TX_POWER, &power, sizeof power); if (error != 0) return error; power.mode = IWI_MODE_11G; DPRINTF(("Setting .11g channels tx power\n")); error = iwi_cmd(sc, IWI_CMD_SET_TX_POWER, &power, sizeof power); if (error != 0) return error; } memset(&rs, 0, sizeof rs); rs.mode = IWI_MODE_11G; rs.type = IWI_RATESET_TYPE_SUPPORTED; rs.nrates = ic->ic_sup_rates[IEEE80211_MODE_11G].rs_nrates; memcpy(rs.rates, ic->ic_sup_rates[IEEE80211_MODE_11G].rs_rates, rs.nrates); DPRINTF(("Setting .11bg supported rates (%u)\n", rs.nrates)); error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs); if (error != 0) return error; memset(&rs, 0, sizeof rs); rs.mode = IWI_MODE_11A; rs.type = IWI_RATESET_TYPE_SUPPORTED; rs.nrates = ic->ic_sup_rates[IEEE80211_MODE_11A].rs_nrates; memcpy(rs.rates, ic->ic_sup_rates[IEEE80211_MODE_11A].rs_rates, rs.nrates); DPRINTF(("Setting .11a supported rates (%u)\n", rs.nrates)); error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs); if (error != 0) return error; data = htole32(arc4random()); DPRINTF(("Setting initialization vector to %u\n", le32toh(data))); error = iwi_cmd(sc, IWI_CMD_SET_IV, &data, sizeof data); if (error != 0) return error; /* enable adapter */ DPRINTF(("Enabling adapter\n")); return iwi_cmd(sc, IWI_CMD_ENABLE, NULL, 0); } static __inline void set_scan_type(struct iwi_scan_ext *scan, int ix, int scan_type) { uint8_t *st = &scan->scan_type[ix / 2]; if (ix % 2) *st = (*st & 0xf0) | ((scan_type & 0xf) << 0); else *st = (*st & 0x0f) | ((scan_type & 0xf) << 4); } static int scan_type(const struct ieee80211_scan_state *ss, const struct ieee80211_channel *chan) { /* We can only set one essid for a directed scan */ if (ss->ss_nssid != 0) return IWI_SCAN_TYPE_BDIRECTED; if ((ss->ss_flags & IEEE80211_SCAN_ACTIVE) && (chan->ic_flags & IEEE80211_CHAN_PASSIVE) == 0) return IWI_SCAN_TYPE_BROADCAST; return IWI_SCAN_TYPE_PASSIVE; } static __inline int scan_band(const struct ieee80211_channel *c) { return IEEE80211_IS_CHAN_5GHZ(c) ? IWI_CHAN_5GHZ : IWI_CHAN_2GHZ; } static void iwi_monitor_scan(void *arg, int npending) { struct iwi_softc *sc = arg; IWI_LOCK_DECL; IWI_LOCK(sc); (void) iwi_scanchan(sc, 2000, 0); IWI_UNLOCK(sc); } /* * Start a scan on the current channel or all channels. */ static int iwi_scanchan(struct iwi_softc *sc, unsigned long maxdwell, int allchan) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_channel *chan; struct ieee80211_scan_state *ss; struct iwi_scan_ext scan; int error = 0; IWI_LOCK_ASSERT(sc); if (sc->fw_state == IWI_FW_SCANNING) { /* * This should not happen as we only trigger scan_next after * completion */ DPRINTF(("%s: called too early - still scanning\n", __func__)); return (EBUSY); } IWI_STATE_BEGIN(sc, IWI_FW_SCANNING); ss = ic->ic_scan; memset(&scan, 0, sizeof scan); scan.full_scan_index = htole32(++sc->sc_scangen); scan.dwell_time[IWI_SCAN_TYPE_PASSIVE] = htole16(maxdwell); if (ic->ic_flags_ext & IEEE80211_FEXT_BGSCAN) { /* * Use very short dwell times for when we send probe request * frames. Without this bg scans hang. Ideally this should * be handled with early-termination as done by net80211 but * that's not feasible (aborting a scan is problematic). */ scan.dwell_time[IWI_SCAN_TYPE_BROADCAST] = htole16(30); scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED] = htole16(30); } else { scan.dwell_time[IWI_SCAN_TYPE_BROADCAST] = htole16(maxdwell); scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED] = htole16(maxdwell); } /* We can only set one essid for a directed scan */ if (ss->ss_nssid != 0) { error = iwi_cmd(sc, IWI_CMD_SET_ESSID, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); if (error) return (error); } if (allchan) { int i, next, band, b, bstart; /* * Convert scan list to run-length encoded channel list * the firmware requires (preserving the order setup by * net80211). The first entry in each run specifies the * band and the count of items in the run. */ next = 0; /* next open slot */ bstart = 0; /* NB: not needed, silence compiler */ band = -1; /* NB: impossible value */ KASSERT(ss->ss_last > 0, ("no channels")); for (i = 0; i < ss->ss_last; i++) { chan = ss->ss_chans[i]; b = scan_band(chan); if (b != band) { if (band != -1) scan.channels[bstart] = (next - bstart) | band; /* NB: this allocates a slot for the run-len */ band = b, bstart = next++; } if (next >= IWI_SCAN_CHANNELS) { DPRINTF(("truncating scan list\n")); break; } scan.channels[next] = ieee80211_chan2ieee(ic, chan); set_scan_type(&scan, next, scan_type(ss, chan)); next++; } scan.channels[bstart] = (next - bstart) | band; } else { /* Scan the current channel only */ chan = ic->ic_curchan; scan.channels[0] = 1 | scan_band(chan); scan.channels[1] = ieee80211_chan2ieee(ic, chan); set_scan_type(&scan, 1, scan_type(ss, chan)); } #ifdef IWI_DEBUG if (iwi_debug > 0) { static const char *scantype[8] = { "PSTOP", "PASV", "DIR", "BCAST", "BDIR", "5", "6", "7" }; int i; printf("Scan request: index %u dwell %d/%d/%d\n" , le32toh(scan.full_scan_index) , le16toh(scan.dwell_time[IWI_SCAN_TYPE_PASSIVE]) , le16toh(scan.dwell_time[IWI_SCAN_TYPE_BROADCAST]) , le16toh(scan.dwell_time[IWI_SCAN_TYPE_BDIRECTED]) ); i = 0; do { int run = scan.channels[i]; if (run == 0) break; printf("Scan %d %s channels:", run & 0x3f, run & IWI_CHAN_2GHZ ? "2.4GHz" : "5GHz"); for (run &= 0x3f, i++; run > 0; run--, i++) { uint8_t type = scan.scan_type[i/2]; printf(" %u/%s", scan.channels[i], scantype[(i & 1 ? type : type>>4) & 7]); } printf("\n"); } while (i < IWI_SCAN_CHANNELS); } #endif return (iwi_cmd(sc, IWI_CMD_SCAN_EXT, &scan, sizeof scan)); } static int iwi_set_sensitivity(struct iwi_softc *sc, int8_t rssi_dbm) { struct iwi_sensitivity sens; DPRINTF(("Setting sensitivity to %d\n", rssi_dbm)); memset(&sens, 0, sizeof sens); sens.rssi = htole16(rssi_dbm); return iwi_cmd(sc, IWI_CMD_SET_SENSITIVITY, &sens, sizeof sens); } static int iwi_auth_and_assoc(struct iwi_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct ifnet *ifp = vap->iv_ifp; struct ieee80211_node *ni; struct iwi_configuration config; struct iwi_associate *assoc = &sc->assoc; struct iwi_rateset rs; uint16_t capinfo; uint32_t data; int error, mode; IWI_LOCK_ASSERT(sc); ni = ieee80211_ref_node(vap->iv_bss); if (sc->flags & IWI_FLAG_ASSOCIATED) { DPRINTF(("Already associated\n")); return (-1); } IWI_STATE_BEGIN(sc, IWI_FW_ASSOCIATING); error = 0; mode = 0; if (IEEE80211_IS_CHAN_A(ic->ic_curchan)) mode = IWI_MODE_11A; else if (IEEE80211_IS_CHAN_G(ic->ic_curchan)) mode = IWI_MODE_11G; if (IEEE80211_IS_CHAN_B(ic->ic_curchan)) mode = IWI_MODE_11B; if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { memset(&config, 0, sizeof config); config.bluetooth_coexistence = sc->bluetooth; config.antenna = sc->antenna; config.multicast_enabled = 1; if (mode == IWI_MODE_11G) config.use_protection = 1; config.answer_pbreq = (vap->iv_opmode == IEEE80211_M_IBSS) ? 1 : 0; config.disable_unicast_decryption = 1; config.disable_multicast_decryption = 1; DPRINTF(("Configuring adapter\n")); error = iwi_cmd(sc, IWI_CMD_SET_CONFIG, &config, sizeof config); if (error != 0) goto done; } #ifdef IWI_DEBUG if (iwi_debug > 0) { printf("Setting ESSID to "); ieee80211_print_essid(ni->ni_essid, ni->ni_esslen); printf("\n"); } #endif error = iwi_cmd(sc, IWI_CMD_SET_ESSID, ni->ni_essid, ni->ni_esslen); if (error != 0) goto done; error = iwi_setpowermode(sc, vap); if (error != 0) goto done; data = htole32(vap->iv_rtsthreshold); DPRINTF(("Setting RTS threshold to %u\n", le32toh(data))); error = iwi_cmd(sc, IWI_CMD_SET_RTS_THRESHOLD, &data, sizeof data); if (error != 0) goto done; data = htole32(vap->iv_fragthreshold); DPRINTF(("Setting fragmentation threshold to %u\n", le32toh(data))); error = iwi_cmd(sc, IWI_CMD_SET_FRAG_THRESHOLD, &data, sizeof data); if (error != 0) goto done; /* the rate set has already been "negotiated" */ memset(&rs, 0, sizeof rs); rs.mode = mode; rs.type = IWI_RATESET_TYPE_NEGOTIATED; rs.nrates = ni->ni_rates.rs_nrates; if (rs.nrates > IWI_RATESET_SIZE) { DPRINTF(("Truncating negotiated rate set from %u\n", rs.nrates)); rs.nrates = IWI_RATESET_SIZE; } memcpy(rs.rates, ni->ni_rates.rs_rates, rs.nrates); DPRINTF(("Setting negotiated rates (%u)\n", rs.nrates)); error = iwi_cmd(sc, IWI_CMD_SET_RATES, &rs, sizeof rs); if (error != 0) goto done; memset(assoc, 0, sizeof *assoc); if ((vap->iv_flags & IEEE80211_F_WME) && ni->ni_ies.wme_ie != NULL) { /* NB: don't treat WME setup as failure */ if (iwi_wme_setparams(sc) == 0 && iwi_wme_setie(sc) == 0) assoc->policy |= htole16(IWI_POLICY_WME); /* XXX complain on failure? */ } if (vap->iv_appie_wpa != NULL) { struct ieee80211_appie *ie = vap->iv_appie_wpa; DPRINTF(("Setting optional IE (len=%u)\n", ie->ie_len)); error = iwi_cmd(sc, IWI_CMD_SET_OPTIE, ie->ie_data, ie->ie_len); if (error != 0) goto done; } error = iwi_set_sensitivity(sc, ic->ic_node_getrssi(ni)); if (error != 0) goto done; assoc->mode = mode; assoc->chan = ic->ic_curchan->ic_ieee; /* * NB: do not arrange for shared key auth w/o privacy * (i.e. a wep key); it causes a firmware error. */ if ((vap->iv_flags & IEEE80211_F_PRIVACY) && ni->ni_authmode == IEEE80211_AUTH_SHARED) { assoc->auth = IWI_AUTH_SHARED; /* * It's possible to have privacy marked but no default * key setup. This typically is due to a user app bug * but if we blindly grab the key the firmware will * barf so avoid it for now. */ if (vap->iv_def_txkey != IEEE80211_KEYIX_NONE) assoc->auth |= vap->iv_def_txkey << 4; error = iwi_setwepkeys(sc, vap); if (error != 0) goto done; } if (vap->iv_flags & IEEE80211_F_WPA) assoc->policy |= htole16(IWI_POLICY_WPA); if (vap->iv_opmode == IEEE80211_M_IBSS && ni->ni_tstamp.tsf == 0) assoc->type = IWI_HC_IBSS_START; else assoc->type = IWI_HC_ASSOC; memcpy(assoc->tstamp, ni->ni_tstamp.data, 8); if (vap->iv_opmode == IEEE80211_M_IBSS) capinfo = IEEE80211_CAPINFO_IBSS; else capinfo = IEEE80211_CAPINFO_ESS; if (vap->iv_flags & IEEE80211_F_PRIVACY) capinfo |= IEEE80211_CAPINFO_PRIVACY; if ((ic->ic_flags & IEEE80211_F_SHPREAMBLE) && IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) capinfo |= IEEE80211_CAPINFO_SHORT_PREAMBLE; if (ni->ni_capinfo & IEEE80211_CAPINFO_SHORT_SLOTTIME) capinfo |= IEEE80211_CAPINFO_SHORT_SLOTTIME; assoc->capinfo = htole16(capinfo); assoc->lintval = htole16(ic->ic_lintval); assoc->intval = htole16(ni->ni_intval); IEEE80211_ADDR_COPY(assoc->bssid, ni->ni_bssid); if (vap->iv_opmode == IEEE80211_M_IBSS) IEEE80211_ADDR_COPY(assoc->dst, ifp->if_broadcastaddr); else IEEE80211_ADDR_COPY(assoc->dst, ni->ni_bssid); DPRINTF(("%s bssid %6D dst %6D channel %u policy 0x%x " "auth %u capinfo 0x%x lintval %u bintval %u\n", assoc->type == IWI_HC_IBSS_START ? "Start" : "Join", assoc->bssid, ":", assoc->dst, ":", assoc->chan, le16toh(assoc->policy), assoc->auth, le16toh(assoc->capinfo), le16toh(assoc->lintval), le16toh(assoc->intval))); error = iwi_cmd(sc, IWI_CMD_ASSOCIATE, assoc, sizeof *assoc); done: ieee80211_free_node(ni); if (error) IWI_STATE_END(sc, IWI_FW_ASSOCIATING); return (error); } static void iwi_disassoc(void *arg, int pending) { struct iwi_softc *sc = arg; IWI_LOCK_DECL; IWI_LOCK(sc); iwi_disassociate(sc, 0); IWI_UNLOCK(sc); } static int iwi_disassociate(struct iwi_softc *sc, int quiet) { struct iwi_associate *assoc = &sc->assoc; if ((sc->flags & IWI_FLAG_ASSOCIATED) == 0) { DPRINTF(("Not associated\n")); return (-1); } IWI_STATE_BEGIN(sc, IWI_FW_DISASSOCIATING); if (quiet) assoc->type = IWI_HC_DISASSOC_QUIET; else assoc->type = IWI_HC_DISASSOC; DPRINTF(("Trying to disassociate from %6D channel %u\n", assoc->bssid, ":", assoc->chan)); return iwi_cmd(sc, IWI_CMD_ASSOCIATE, assoc, sizeof *assoc); } /* * release dma resources for the firmware */ static void iwi_release_fw_dma(struct iwi_softc *sc) { if (sc->fw_flags & IWI_FW_HAVE_PHY) bus_dmamap_unload(sc->fw_dmat, sc->fw_map); if (sc->fw_flags & IWI_FW_HAVE_MAP) bus_dmamem_free(sc->fw_dmat, sc->fw_virtaddr, sc->fw_map); if (sc->fw_flags & IWI_FW_HAVE_DMAT) bus_dma_tag_destroy(sc->fw_dmat); sc->fw_flags = 0; sc->fw_dma_size = 0; sc->fw_dmat = NULL; sc->fw_map = NULL; sc->fw_physaddr = 0; sc->fw_virtaddr = NULL; } /* * allocate the dma descriptor for the firmware. * Return 0 on success, 1 on error. * Must be called unlocked, protected by IWI_FLAG_FW_LOADING. */ static int iwi_init_fw_dma(struct iwi_softc *sc, int size) { if (sc->fw_dma_size >= size) return 0; if (bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, 0, NULL, NULL, &sc->fw_dmat) != 0) { device_printf(sc->sc_dev, "could not create firmware DMA tag\n"); goto error; } sc->fw_flags |= IWI_FW_HAVE_DMAT; if (bus_dmamem_alloc(sc->fw_dmat, &sc->fw_virtaddr, 0, &sc->fw_map) != 0) { device_printf(sc->sc_dev, "could not allocate firmware DMA memory\n"); goto error; } sc->fw_flags |= IWI_FW_HAVE_MAP; if (bus_dmamap_load(sc->fw_dmat, sc->fw_map, sc->fw_virtaddr, size, iwi_dma_map_addr, &sc->fw_physaddr, 0) != 0) { device_printf(sc->sc_dev, "could not load firmware DMA map\n"); goto error; } sc->fw_flags |= IWI_FW_HAVE_PHY; sc->fw_dma_size = size; return 0; error: iwi_release_fw_dma(sc); return 1; } static void iwi_init_locked(struct iwi_softc *sc) { struct iwi_rx_data *data; int i; IWI_LOCK_ASSERT(sc); if (sc->fw_state == IWI_FW_LOADING) { device_printf(sc->sc_dev, "%s: already loading\n", __func__); return; /* XXX: condvar? */ } iwi_stop_locked(sc); IWI_STATE_BEGIN(sc, IWI_FW_LOADING); if (iwi_reset(sc) != 0) { device_printf(sc->sc_dev, "could not reset adapter\n"); goto fail; } if (iwi_load_firmware(sc, &sc->fw_boot) != 0) { device_printf(sc->sc_dev, "could not load boot firmware %s\n", sc->fw_boot.name); goto fail; } if (iwi_load_ucode(sc, &sc->fw_uc) != 0) { device_printf(sc->sc_dev, "could not load microcode %s\n", sc->fw_uc.name); goto fail; } iwi_stop_master(sc); CSR_WRITE_4(sc, IWI_CSR_CMD_BASE, sc->cmdq.physaddr); CSR_WRITE_4(sc, IWI_CSR_CMD_SIZE, sc->cmdq.count); CSR_WRITE_4(sc, IWI_CSR_CMD_WIDX, sc->cmdq.cur); CSR_WRITE_4(sc, IWI_CSR_TX1_BASE, sc->txq[0].physaddr); CSR_WRITE_4(sc, IWI_CSR_TX1_SIZE, sc->txq[0].count); CSR_WRITE_4(sc, IWI_CSR_TX1_WIDX, sc->txq[0].cur); CSR_WRITE_4(sc, IWI_CSR_TX2_BASE, sc->txq[1].physaddr); CSR_WRITE_4(sc, IWI_CSR_TX2_SIZE, sc->txq[1].count); CSR_WRITE_4(sc, IWI_CSR_TX2_WIDX, sc->txq[1].cur); CSR_WRITE_4(sc, IWI_CSR_TX3_BASE, sc->txq[2].physaddr); CSR_WRITE_4(sc, IWI_CSR_TX3_SIZE, sc->txq[2].count); CSR_WRITE_4(sc, IWI_CSR_TX3_WIDX, sc->txq[2].cur); CSR_WRITE_4(sc, IWI_CSR_TX4_BASE, sc->txq[3].physaddr); CSR_WRITE_4(sc, IWI_CSR_TX4_SIZE, sc->txq[3].count); CSR_WRITE_4(sc, IWI_CSR_TX4_WIDX, sc->txq[3].cur); for (i = 0; i < sc->rxq.count; i++) { data = &sc->rxq.data[i]; CSR_WRITE_4(sc, data->reg, data->physaddr); } CSR_WRITE_4(sc, IWI_CSR_RX_WIDX, sc->rxq.count - 1); if (iwi_load_firmware(sc, &sc->fw_fw) != 0) { device_printf(sc->sc_dev, "could not load main firmware %s\n", sc->fw_fw.name); goto fail; } sc->flags |= IWI_FLAG_FW_INITED; IWI_STATE_END(sc, IWI_FW_LOADING); if (iwi_config(sc) != 0) { device_printf(sc->sc_dev, "unable to enable adapter\n"); goto fail2; } callout_reset(&sc->sc_wdtimer, hz, iwi_watchdog, sc); sc->sc_running = 1; return; fail: IWI_STATE_END(sc, IWI_FW_LOADING); fail2: iwi_stop_locked(sc); } static void iwi_init(void *priv) { struct iwi_softc *sc = priv; struct ieee80211com *ic = &sc->sc_ic; IWI_LOCK_DECL; IWI_LOCK(sc); iwi_init_locked(sc); IWI_UNLOCK(sc); if (sc->sc_running) ieee80211_start_all(ic); } static void iwi_stop_locked(void *priv) { struct iwi_softc *sc = priv; IWI_LOCK_ASSERT(sc); sc->sc_running = 0; if (sc->sc_softled) { callout_stop(&sc->sc_ledtimer); sc->sc_blinking = 0; } callout_stop(&sc->sc_wdtimer); callout_stop(&sc->sc_rftimer); iwi_stop_master(sc); CSR_WRITE_4(sc, IWI_CSR_RST, IWI_RST_SOFT_RESET); /* reset rings */ iwi_reset_cmd_ring(sc, &sc->cmdq); iwi_reset_tx_ring(sc, &sc->txq[0]); iwi_reset_tx_ring(sc, &sc->txq[1]); iwi_reset_tx_ring(sc, &sc->txq[2]); iwi_reset_tx_ring(sc, &sc->txq[3]); iwi_reset_rx_ring(sc, &sc->rxq); sc->sc_tx_timer = 0; sc->sc_state_timer = 0; sc->sc_busy_timer = 0; sc->flags &= ~(IWI_FLAG_BUSY | IWI_FLAG_ASSOCIATED); sc->fw_state = IWI_FW_IDLE; wakeup(sc); } static void iwi_stop(struct iwi_softc *sc) { IWI_LOCK_DECL; IWI_LOCK(sc); iwi_stop_locked(sc); IWI_UNLOCK(sc); } static void iwi_restart(void *arg, int npending) { struct iwi_softc *sc = arg; iwi_init(sc); } /* * Return whether or not the radio is enabled in hardware * (i.e. the rfkill switch is "off"). */ static int iwi_getrfkill(struct iwi_softc *sc) { return (CSR_READ_4(sc, IWI_CSR_IO) & IWI_IO_RADIO_ENABLED) == 0; } static void iwi_radio_on(void *arg, int pending) { struct iwi_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; device_printf(sc->sc_dev, "radio turned on\n"); iwi_init(sc); ieee80211_notify_radio(ic, 1); } static void iwi_rfkill_poll(void *arg) { struct iwi_softc *sc = arg; IWI_LOCK_ASSERT(sc); /* * Check for a change in rfkill state. We get an * interrupt when a radio is disabled but not when * it is enabled so we must poll for the latter. */ if (!iwi_getrfkill(sc)) { ieee80211_runtask(&sc->sc_ic, &sc->sc_radiontask); return; } callout_reset(&sc->sc_rftimer, 2*hz, iwi_rfkill_poll, sc); } static void iwi_radio_off(void *arg, int pending) { struct iwi_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; IWI_LOCK_DECL; device_printf(sc->sc_dev, "radio turned off\n"); ieee80211_notify_radio(ic, 0); IWI_LOCK(sc); iwi_stop_locked(sc); iwi_rfkill_poll(sc); IWI_UNLOCK(sc); } static int iwi_sysctl_stats(SYSCTL_HANDLER_ARGS) { struct iwi_softc *sc = arg1; uint32_t size, buf[128]; memset(buf, 0, sizeof buf); if (!(sc->flags & IWI_FLAG_FW_INITED)) return SYSCTL_OUT(req, buf, sizeof buf); size = min(CSR_READ_4(sc, IWI_CSR_TABLE0_SIZE), 128 - 1); CSR_READ_REGION_4(sc, IWI_CSR_TABLE0_BASE, &buf[1], size); return SYSCTL_OUT(req, buf, size); } static int iwi_sysctl_radio(SYSCTL_HANDLER_ARGS) { struct iwi_softc *sc = arg1; int val = !iwi_getrfkill(sc); return SYSCTL_OUT(req, &val, sizeof val); } /* * Add sysctl knobs. */ static void iwi_sysctlattach(struct iwi_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "radio", CTLTYPE_INT | CTLFLAG_RD, sc, 0, iwi_sysctl_radio, "I", "radio transmitter switch state (0=off, 1=on)"); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "stats", CTLTYPE_OPAQUE | CTLFLAG_RD, sc, 0, iwi_sysctl_stats, "S", "statistics"); sc->bluetooth = 0; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "bluetooth", CTLFLAG_RW, &sc->bluetooth, 0, "bluetooth coexistence"); sc->antenna = IWI_ANTENNA_AUTO; SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "antenna", CTLFLAG_RW, &sc->antenna, 0, "antenna (0=auto)"); } /* * LED support. * * Different cards have different capabilities. Some have three * led's while others have only one. The linux ipw driver defines * led's for link state (associated or not), band (11a, 11g, 11b), * and for link activity. We use one led and vary the blink rate * according to the tx/rx traffic a la the ath driver. */ static __inline uint32_t iwi_toggle_event(uint32_t r) { return r &~ (IWI_RST_STANDBY | IWI_RST_GATE_ODMA | IWI_RST_GATE_IDMA | IWI_RST_GATE_ADMA); } static uint32_t iwi_read_event(struct iwi_softc *sc) { return MEM_READ_4(sc, IWI_MEM_EEPROM_EVENT); } static void iwi_write_event(struct iwi_softc *sc, uint32_t v) { MEM_WRITE_4(sc, IWI_MEM_EEPROM_EVENT, v); } static void iwi_led_done(void *arg) { struct iwi_softc *sc = arg; sc->sc_blinking = 0; } /* * Turn the activity LED off: flip the pin and then set a timer so no * update will happen for the specified duration. */ static void iwi_led_off(void *arg) { struct iwi_softc *sc = arg; uint32_t v; v = iwi_read_event(sc); v &= ~sc->sc_ledpin; iwi_write_event(sc, iwi_toggle_event(v)); callout_reset(&sc->sc_ledtimer, sc->sc_ledoff, iwi_led_done, sc); } /* * Blink the LED according to the specified on/off times. */ static void iwi_led_blink(struct iwi_softc *sc, int on, int off) { uint32_t v; v = iwi_read_event(sc); v |= sc->sc_ledpin; iwi_write_event(sc, iwi_toggle_event(v)); sc->sc_blinking = 1; sc->sc_ledoff = off; callout_reset(&sc->sc_ledtimer, on, iwi_led_off, sc); } static void iwi_led_event(struct iwi_softc *sc, int event) { /* NB: on/off times from the Atheros NDIS driver, w/ permission */ static const struct { u_int rate; /* tx/rx iwi rate */ u_int16_t timeOn; /* LED on time (ms) */ u_int16_t timeOff; /* LED off time (ms) */ } blinkrates[] = { { IWI_RATE_OFDM54, 40, 10 }, { IWI_RATE_OFDM48, 44, 11 }, { IWI_RATE_OFDM36, 50, 13 }, { IWI_RATE_OFDM24, 57, 14 }, { IWI_RATE_OFDM18, 67, 16 }, { IWI_RATE_OFDM12, 80, 20 }, { IWI_RATE_DS11, 100, 25 }, { IWI_RATE_OFDM9, 133, 34 }, { IWI_RATE_OFDM6, 160, 40 }, { IWI_RATE_DS5, 200, 50 }, { 6, 240, 58 }, /* XXX 3Mb/s if it existed */ { IWI_RATE_DS2, 267, 66 }, { IWI_RATE_DS1, 400, 100 }, { 0, 500, 130 }, /* unknown rate/polling */ }; uint32_t txrate; int j = 0; /* XXX silence compiler */ sc->sc_ledevent = ticks; /* time of last event */ if (sc->sc_blinking) /* don't interrupt active blink */ return; switch (event) { case IWI_LED_POLL: j = nitems(blinkrates)-1; break; case IWI_LED_TX: /* read current transmission rate from adapter */ txrate = CSR_READ_4(sc, IWI_CSR_CURRENT_TX_RATE); if (blinkrates[sc->sc_txrix].rate != txrate) { for (j = 0; j < nitems(blinkrates)-1; j++) if (blinkrates[j].rate == txrate) break; sc->sc_txrix = j; } else j = sc->sc_txrix; break; case IWI_LED_RX: if (blinkrates[sc->sc_rxrix].rate != sc->sc_rxrate) { for (j = 0; j < nitems(blinkrates)-1; j++) if (blinkrates[j].rate == sc->sc_rxrate) break; sc->sc_rxrix = j; } else j = sc->sc_rxrix; break; } /* XXX beware of overflow */ iwi_led_blink(sc, (blinkrates[j].timeOn * hz) / 1000, (blinkrates[j].timeOff * hz) / 1000); } static int iwi_sysctl_softled(SYSCTL_HANDLER_ARGS) { struct iwi_softc *sc = arg1; int softled = sc->sc_softled; int error; error = sysctl_handle_int(oidp, &softled, 0, req); if (error || !req->newptr) return error; softled = (softled != 0); if (softled != sc->sc_softled) { if (softled) { uint32_t v = iwi_read_event(sc); v &= ~sc->sc_ledpin; iwi_write_event(sc, iwi_toggle_event(v)); } sc->sc_softled = softled; } return 0; } static void iwi_ledattach(struct iwi_softc *sc) { struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); sc->sc_blinking = 0; sc->sc_ledstate = 1; sc->sc_ledidle = (2700*hz)/1000; /* 2.7sec */ callout_init_mtx(&sc->sc_ledtimer, &sc->sc_mtx, 0); SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "softled", CTLTYPE_INT | CTLFLAG_RW, sc, 0, iwi_sysctl_softled, "I", "enable/disable software LED support"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledpin", CTLFLAG_RW, &sc->sc_ledpin, 0, "pin setting to turn activity LED on"); SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "ledidle", CTLFLAG_RW, &sc->sc_ledidle, 0, "idle time for inactivity LED (ticks)"); /* XXX for debugging */ SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "nictype", CTLFLAG_RD, &sc->sc_nictype, 0, "NIC type from EEPROM"); sc->sc_ledpin = IWI_RST_LED_ACTIVITY; sc->sc_softled = 1; sc->sc_nictype = (iwi_read_prom_word(sc, IWI_EEPROM_NIC) >> 8) & 0xff; if (sc->sc_nictype == 1) { /* * NB: led's are reversed. */ sc->sc_ledpin = IWI_RST_LED_ASSOCIATED; } } static void iwi_scan_start(struct ieee80211com *ic) { /* ignore */ } static void iwi_set_channel(struct ieee80211com *ic) { struct iwi_softc *sc = ic->ic_softc; if (sc->fw_state == IWI_FW_IDLE) iwi_setcurchan(sc, ic->ic_curchan->ic_ieee); } static void iwi_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { struct ieee80211vap *vap = ss->ss_vap; struct iwi_softc *sc = vap->iv_ic->ic_softc; IWI_LOCK_DECL; IWI_LOCK(sc); if (iwi_scanchan(sc, maxdwell, 0)) ieee80211_cancel_scan(vap); IWI_UNLOCK(sc); } static void iwi_scan_mindwell(struct ieee80211_scan_state *ss) { /* NB: don't try to abort scan; wait for firmware to finish */ } static void iwi_scan_end(struct ieee80211com *ic) { struct iwi_softc *sc = ic->ic_softc; IWI_LOCK_DECL; IWI_LOCK(sc); sc->flags &= ~IWI_FLAG_CHANNEL_SCAN; /* NB: make sure we're still scanning */ if (sc->fw_state == IWI_FW_SCANNING) iwi_cmd(sc, IWI_CMD_ABORT_SCAN, NULL, 0); IWI_UNLOCK(sc); } static void iwi_collect_bands(struct ieee80211com *ic, uint8_t bands[], size_t bands_sz) { struct iwi_softc *sc = ic->ic_softc; device_t dev = sc->sc_dev; memset(bands, 0, bands_sz); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); if (pci_get_device(dev) >= 0x4223) setbit(bands, IEEE80211_MODE_11A); } static void iwi_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { uint8_t bands[IEEE80211_MODE_BYTES]; iwi_collect_bands(ic, bands, sizeof(bands)); *nchans = 0; if (isset(bands, IEEE80211_MODE_11B) || isset(bands, IEEE80211_MODE_11G)) ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, def_chan_2ghz, nitems(def_chan_2ghz), bands, 0); if (isset(bands, IEEE80211_MODE_11A)) { ieee80211_add_channel_list_5ghz(chans, maxchans, nchans, def_chan_5ghz_band1, nitems(def_chan_5ghz_band1), bands, 0); ieee80211_add_channel_list_5ghz(chans, maxchans, nchans, def_chan_5ghz_band2, nitems(def_chan_5ghz_band2), bands, 0); ieee80211_add_channel_list_5ghz(chans, maxchans, nchans, def_chan_5ghz_band3, nitems(def_chan_5ghz_band3), bands, 0); } } Index: head/sys/dev/kbd/kbd.c =================================================================== --- head/sys/dev/kbd/kbd.c (revision 327948) +++ head/sys/dev/kbd/kbd.c (revision 327949) @@ -1,1477 +1,1478 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999 Kazutaka YOKOTA * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer as * the first lines of this file unmodified. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include "opt_kbd.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define KBD_INDEX(dev) dev2unit(dev) #define KB_QSIZE 512 #define KB_BUFSIZE 64 typedef struct genkbd_softc { int gkb_flags; /* flag/status bits */ #define KB_ASLEEP (1 << 0) struct selinfo gkb_rsel; char gkb_q[KB_QSIZE]; /* input queue */ unsigned int gkb_q_start; unsigned int gkb_q_length; } genkbd_softc_t; static SLIST_HEAD(, keyboard_driver) keyboard_drivers = SLIST_HEAD_INITIALIZER(keyboard_drivers); SET_DECLARE(kbddriver_set, const keyboard_driver_t); /* local arrays */ /* * We need at least one entry each in order to initialize a keyboard * for the kernel console. The arrays will be increased dynamically * when necessary. */ static int keyboards = 1; static keyboard_t *kbd_ini; static keyboard_t **keyboard = &kbd_ini; static keyboard_switch_t *kbdsw_ini; keyboard_switch_t **kbdsw = &kbdsw_ini; static int keymap_restrict_change; static SYSCTL_NODE(_hw, OID_AUTO, kbd, CTLFLAG_RD, 0, "kbd"); SYSCTL_INT(_hw_kbd, OID_AUTO, keymap_restrict_change, CTLFLAG_RW, &keymap_restrict_change, 0, "restrict ability to change keymap"); #define ARRAY_DELTA 4 static int kbd_realloc_array(void) { keyboard_t **new_kbd; keyboard_switch_t **new_kbdsw; - int newsize; + u_int newsize; int s; s = spltty(); newsize = rounddown(keyboards + ARRAY_DELTA, ARRAY_DELTA); - new_kbd = malloc(sizeof(*new_kbd)*newsize, M_DEVBUF, M_NOWAIT|M_ZERO); + new_kbd = mallocarray(newsize, sizeof(*new_kbd), M_DEVBUF, + M_NOWAIT|M_ZERO); if (new_kbd == NULL) { splx(s); return (ENOMEM); } - new_kbdsw = malloc(sizeof(*new_kbdsw)*newsize, M_DEVBUF, + new_kbdsw = mallocarray(newsize, sizeof(*new_kbdsw), M_DEVBUF, M_NOWAIT|M_ZERO); if (new_kbdsw == NULL) { free(new_kbd, M_DEVBUF); splx(s); return (ENOMEM); } bcopy(keyboard, new_kbd, sizeof(*keyboard)*keyboards); bcopy(kbdsw, new_kbdsw, sizeof(*kbdsw)*keyboards); if (keyboards > 1) { free(keyboard, M_DEVBUF); free(kbdsw, M_DEVBUF); } keyboard = new_kbd; kbdsw = new_kbdsw; keyboards = newsize; splx(s); if (bootverbose) printf("kbd: new array size %d\n", keyboards); return (0); } /* * Low-level keyboard driver functions * Keyboard subdrivers, such as the AT keyboard driver and the USB keyboard * driver, call these functions to initialize the keyboard_t structure * and register it to the virtual keyboard driver `kbd'. */ /* initialize the keyboard_t structure */ void kbd_init_struct(keyboard_t *kbd, char *name, int type, int unit, int config, int port, int port_size) { kbd->kb_flags = KB_NO_DEVICE; /* device has not been found */ kbd->kb_name = name; kbd->kb_type = type; kbd->kb_unit = unit; kbd->kb_config = config & ~KB_CONF_PROBE_ONLY; kbd->kb_led = 0; /* unknown */ kbd->kb_io_base = port; kbd->kb_io_size = port_size; kbd->kb_data = NULL; kbd->kb_keymap = NULL; kbd->kb_accentmap = NULL; kbd->kb_fkeytab = NULL; kbd->kb_fkeytab_size = 0; kbd->kb_delay1 = KB_DELAY1; /* these values are advisory only */ kbd->kb_delay2 = KB_DELAY2; kbd->kb_count = 0L; bzero(kbd->kb_lastact, sizeof(kbd->kb_lastact)); } void kbd_set_maps(keyboard_t *kbd, keymap_t *keymap, accentmap_t *accmap, fkeytab_t *fkeymap, int fkeymap_size) { kbd->kb_keymap = keymap; kbd->kb_accentmap = accmap; kbd->kb_fkeytab = fkeymap; kbd->kb_fkeytab_size = fkeymap_size; } /* declare a new keyboard driver */ int kbd_add_driver(keyboard_driver_t *driver) { if (SLIST_NEXT(driver, link)) return (EINVAL); SLIST_INSERT_HEAD(&keyboard_drivers, driver, link); return (0); } int kbd_delete_driver(keyboard_driver_t *driver) { SLIST_REMOVE(&keyboard_drivers, driver, keyboard_driver, link); SLIST_NEXT(driver, link) = NULL; return (0); } /* register a keyboard and associate it with a function table */ int kbd_register(keyboard_t *kbd) { const keyboard_driver_t **list; const keyboard_driver_t *p; keyboard_t *mux; keyboard_info_t ki; int index; mux = kbd_get_keyboard(kbd_find_keyboard("kbdmux", -1)); for (index = 0; index < keyboards; ++index) { if (keyboard[index] == NULL) break; } if (index >= keyboards) { if (kbd_realloc_array()) return (-1); } kbd->kb_index = index; KBD_UNBUSY(kbd); KBD_VALID(kbd); kbd->kb_active = 0; /* disabled until someone calls kbd_enable() */ kbd->kb_token = NULL; kbd->kb_callback.kc_func = NULL; kbd->kb_callback.kc_arg = NULL; SLIST_FOREACH(p, &keyboard_drivers, link) { if (strcmp(p->name, kbd->kb_name) == 0) { keyboard[index] = kbd; kbdsw[index] = p->kbdsw; if (mux != NULL) { bzero(&ki, sizeof(ki)); strcpy(ki.kb_name, kbd->kb_name); ki.kb_unit = kbd->kb_unit; (void)kbdd_ioctl(mux, KBADDKBD, (caddr_t) &ki); } return (index); } } SET_FOREACH(list, kbddriver_set) { p = *list; if (strcmp(p->name, kbd->kb_name) == 0) { keyboard[index] = kbd; kbdsw[index] = p->kbdsw; if (mux != NULL) { bzero(&ki, sizeof(ki)); strcpy(ki.kb_name, kbd->kb_name); ki.kb_unit = kbd->kb_unit; (void)kbdd_ioctl(mux, KBADDKBD, (caddr_t) &ki); } return (index); } } return (-1); } int kbd_unregister(keyboard_t *kbd) { int error; int s; if ((kbd->kb_index < 0) || (kbd->kb_index >= keyboards)) return (ENOENT); if (keyboard[kbd->kb_index] != kbd) return (ENOENT); s = spltty(); if (KBD_IS_BUSY(kbd)) { error = (*kbd->kb_callback.kc_func)(kbd, KBDIO_UNLOADING, kbd->kb_callback.kc_arg); if (error) { splx(s); return (error); } if (KBD_IS_BUSY(kbd)) { splx(s); return (EBUSY); } } KBD_INVALID(kbd); keyboard[kbd->kb_index] = NULL; kbdsw[kbd->kb_index] = NULL; splx(s); return (0); } /* find a function table by the driver name */ keyboard_switch_t * kbd_get_switch(char *driver) { const keyboard_driver_t **list; const keyboard_driver_t *p; SLIST_FOREACH(p, &keyboard_drivers, link) { if (strcmp(p->name, driver) == 0) return (p->kbdsw); } SET_FOREACH(list, kbddriver_set) { p = *list; if (strcmp(p->name, driver) == 0) return (p->kbdsw); } return (NULL); } /* * Keyboard client functions * Keyboard clients, such as the console driver `syscons' and the keyboard * cdev driver, use these functions to claim and release a keyboard for * exclusive use. */ /* * find the keyboard specified by a driver name and a unit number * starting at given index */ int kbd_find_keyboard2(char *driver, int unit, int index) { int i; if ((index < 0) || (index >= keyboards)) return (-1); for (i = index; i < keyboards; ++i) { if (keyboard[i] == NULL) continue; if (!KBD_IS_VALID(keyboard[i])) continue; if (strcmp("*", driver) && strcmp(keyboard[i]->kb_name, driver)) continue; if ((unit != -1) && (keyboard[i]->kb_unit != unit)) continue; return (i); } return (-1); } /* find the keyboard specified by a driver name and a unit number */ int kbd_find_keyboard(char *driver, int unit) { return (kbd_find_keyboard2(driver, unit, 0)); } /* allocate a keyboard */ int kbd_allocate(char *driver, int unit, void *id, kbd_callback_func_t *func, void *arg) { int index; int s; if (func == NULL) return (-1); s = spltty(); index = kbd_find_keyboard(driver, unit); if (index >= 0) { if (KBD_IS_BUSY(keyboard[index])) { splx(s); return (-1); } keyboard[index]->kb_token = id; KBD_BUSY(keyboard[index]); keyboard[index]->kb_callback.kc_func = func; keyboard[index]->kb_callback.kc_arg = arg; kbdd_clear_state(keyboard[index]); } splx(s); return (index); } int kbd_release(keyboard_t *kbd, void *id) { int error; int s; s = spltty(); if (!KBD_IS_VALID(kbd) || !KBD_IS_BUSY(kbd)) { error = EINVAL; } else if (kbd->kb_token != id) { error = EPERM; } else { kbd->kb_token = NULL; KBD_UNBUSY(kbd); kbd->kb_callback.kc_func = NULL; kbd->kb_callback.kc_arg = NULL; kbdd_clear_state(kbd); error = 0; } splx(s); return (error); } int kbd_change_callback(keyboard_t *kbd, void *id, kbd_callback_func_t *func, void *arg) { int error; int s; s = spltty(); if (!KBD_IS_VALID(kbd) || !KBD_IS_BUSY(kbd)) { error = EINVAL; } else if (kbd->kb_token != id) { error = EPERM; } else if (func == NULL) { error = EINVAL; } else { kbd->kb_callback.kc_func = func; kbd->kb_callback.kc_arg = arg; error = 0; } splx(s); return (error); } /* get a keyboard structure */ keyboard_t * kbd_get_keyboard(int index) { if ((index < 0) || (index >= keyboards)) return (NULL); if (keyboard[index] == NULL) return (NULL); if (!KBD_IS_VALID(keyboard[index])) return (NULL); return (keyboard[index]); } /* * The back door for the console driver; configure keyboards * This function is for the kernel console to initialize keyboards * at very early stage. */ int kbd_configure(int flags) { const keyboard_driver_t **list; const keyboard_driver_t *p; SLIST_FOREACH(p, &keyboard_drivers, link) { if (p->configure != NULL) (*p->configure)(flags); } SET_FOREACH(list, kbddriver_set) { p = *list; if (p->configure != NULL) (*p->configure)(flags); } return (0); } #ifdef KBD_INSTALL_CDEV /* * Virtual keyboard cdev driver functions * The virtual keyboard driver dispatches driver functions to * appropriate subdrivers. */ #define KBD_UNIT(dev) dev2unit(dev) static d_open_t genkbdopen; static d_close_t genkbdclose; static d_read_t genkbdread; static d_write_t genkbdwrite; static d_ioctl_t genkbdioctl; static d_poll_t genkbdpoll; static struct cdevsw kbd_cdevsw = { .d_version = D_VERSION, .d_flags = D_NEEDGIANT, .d_open = genkbdopen, .d_close = genkbdclose, .d_read = genkbdread, .d_write = genkbdwrite, .d_ioctl = genkbdioctl, .d_poll = genkbdpoll, .d_name = "kbd", }; int kbd_attach(keyboard_t *kbd) { if (kbd->kb_index >= keyboards) return (EINVAL); if (keyboard[kbd->kb_index] != kbd) return (EINVAL); kbd->kb_dev = make_dev(&kbd_cdevsw, kbd->kb_index, UID_ROOT, GID_WHEEL, 0600, "%s%r", kbd->kb_name, kbd->kb_unit); make_dev_alias(kbd->kb_dev, "kbd%r", kbd->kb_index); kbd->kb_dev->si_drv1 = malloc(sizeof(genkbd_softc_t), M_DEVBUF, M_WAITOK | M_ZERO); printf("kbd%d at %s%d\n", kbd->kb_index, kbd->kb_name, kbd->kb_unit); return (0); } int kbd_detach(keyboard_t *kbd) { if (kbd->kb_index >= keyboards) return (EINVAL); if (keyboard[kbd->kb_index] != kbd) return (EINVAL); free(kbd->kb_dev->si_drv1, M_DEVBUF); destroy_dev(kbd->kb_dev); return (0); } /* * Generic keyboard cdev driver functions * Keyboard subdrivers may call these functions to implement common * driver functions. */ static void genkbd_putc(genkbd_softc_t *sc, char c) { unsigned int p; if (sc->gkb_q_length == KB_QSIZE) return; p = (sc->gkb_q_start + sc->gkb_q_length) % KB_QSIZE; sc->gkb_q[p] = c; sc->gkb_q_length++; } static size_t genkbd_getc(genkbd_softc_t *sc, char *buf, size_t len) { /* Determine copy size. */ if (sc->gkb_q_length == 0) return (0); if (len >= sc->gkb_q_length) len = sc->gkb_q_length; if (len >= KB_QSIZE - sc->gkb_q_start) len = KB_QSIZE - sc->gkb_q_start; /* Copy out data and progress offset. */ memcpy(buf, sc->gkb_q + sc->gkb_q_start, len); sc->gkb_q_start = (sc->gkb_q_start + len) % KB_QSIZE; sc->gkb_q_length -= len; return (len); } static kbd_callback_func_t genkbd_event; static int genkbdopen(struct cdev *dev, int mode, int flag, struct thread *td) { keyboard_t *kbd; genkbd_softc_t *sc; int s; int i; s = spltty(); sc = dev->si_drv1; kbd = kbd_get_keyboard(KBD_INDEX(dev)); if ((sc == NULL) || (kbd == NULL) || !KBD_IS_VALID(kbd)) { splx(s); return (ENXIO); } i = kbd_allocate(kbd->kb_name, kbd->kb_unit, sc, genkbd_event, (void *)sc); if (i < 0) { splx(s); return (EBUSY); } /* assert(i == kbd->kb_index) */ /* assert(kbd == kbd_get_keyboard(i)) */ /* * NOTE: even when we have successfully claimed a keyboard, * the device may still be missing (!KBD_HAS_DEVICE(kbd)). */ sc->gkb_q_length = 0; splx(s); return (0); } static int genkbdclose(struct cdev *dev, int mode, int flag, struct thread *td) { keyboard_t *kbd; genkbd_softc_t *sc; int s; /* * NOTE: the device may have already become invalid. * kbd == NULL || !KBD_IS_VALID(kbd) */ s = spltty(); sc = dev->si_drv1; kbd = kbd_get_keyboard(KBD_INDEX(dev)); if ((sc == NULL) || (kbd == NULL) || !KBD_IS_VALID(kbd)) { /* XXX: we shall be forgiving and don't report error... */ } else { kbd_release(kbd, (void *)sc); } splx(s); return (0); } static int genkbdread(struct cdev *dev, struct uio *uio, int flag) { keyboard_t *kbd; genkbd_softc_t *sc; u_char buffer[KB_BUFSIZE]; int len; int error; int s; /* wait for input */ s = spltty(); sc = dev->si_drv1; kbd = kbd_get_keyboard(KBD_INDEX(dev)); if ((sc == NULL) || (kbd == NULL) || !KBD_IS_VALID(kbd)) { splx(s); return (ENXIO); } while (sc->gkb_q_length == 0) { if (flag & O_NONBLOCK) { splx(s); return (EWOULDBLOCK); } sc->gkb_flags |= KB_ASLEEP; error = tsleep(sc, PZERO | PCATCH, "kbdrea", 0); kbd = kbd_get_keyboard(KBD_INDEX(dev)); if ((kbd == NULL) || !KBD_IS_VALID(kbd)) { splx(s); return (ENXIO); /* our keyboard has gone... */ } if (error) { sc->gkb_flags &= ~KB_ASLEEP; splx(s); return (error); } } splx(s); /* copy as much input as possible */ error = 0; while (uio->uio_resid > 0) { len = imin(uio->uio_resid, sizeof(buffer)); len = genkbd_getc(sc, buffer, len); if (len <= 0) break; error = uiomove(buffer, len, uio); if (error) break; } return (error); } static int genkbdwrite(struct cdev *dev, struct uio *uio, int flag) { keyboard_t *kbd; kbd = kbd_get_keyboard(KBD_INDEX(dev)); if ((kbd == NULL) || !KBD_IS_VALID(kbd)) return (ENXIO); return (ENODEV); } static int genkbdioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { keyboard_t *kbd; int error; kbd = kbd_get_keyboard(KBD_INDEX(dev)); if ((kbd == NULL) || !KBD_IS_VALID(kbd)) return (ENXIO); error = kbdd_ioctl(kbd, cmd, arg); if (error == ENOIOCTL) error = ENODEV; return (error); } static int genkbdpoll(struct cdev *dev, int events, struct thread *td) { keyboard_t *kbd; genkbd_softc_t *sc; int revents; int s; revents = 0; s = spltty(); sc = dev->si_drv1; kbd = kbd_get_keyboard(KBD_INDEX(dev)); if ((sc == NULL) || (kbd == NULL) || !KBD_IS_VALID(kbd)) { revents = POLLHUP; /* the keyboard has gone */ } else if (events & (POLLIN | POLLRDNORM)) { if (sc->gkb_q_length > 0) revents = events & (POLLIN | POLLRDNORM); else selrecord(td, &sc->gkb_rsel); } splx(s); return (revents); } static int genkbd_event(keyboard_t *kbd, int event, void *arg) { genkbd_softc_t *sc; size_t len; u_char *cp; int mode; u_int c; /* assert(KBD_IS_VALID(kbd)) */ sc = (genkbd_softc_t *)arg; switch (event) { case KBDIO_KEYINPUT: break; case KBDIO_UNLOADING: /* the keyboard is going... */ kbd_release(kbd, (void *)sc); if (sc->gkb_flags & KB_ASLEEP) { sc->gkb_flags &= ~KB_ASLEEP; wakeup(sc); } selwakeuppri(&sc->gkb_rsel, PZERO); return (0); default: return (EINVAL); } /* obtain the current key input mode */ if (kbdd_ioctl(kbd, KDGKBMODE, (caddr_t)&mode)) mode = K_XLATE; /* read all pending input */ while (kbdd_check_char(kbd)) { c = kbdd_read_char(kbd, FALSE); if (c == NOKEY) continue; if (c == ERRKEY) /* XXX: ring bell? */ continue; if (!KBD_IS_BUSY(kbd)) /* the device is not open, discard the input */ continue; /* store the byte as is for K_RAW and K_CODE modes */ if (mode != K_XLATE) { genkbd_putc(sc, KEYCHAR(c)); continue; } /* K_XLATE */ if (c & RELKEY) /* key release is ignored */ continue; /* process special keys; most of them are just ignored... */ if (c & SPCLKEY) { switch (KEYCHAR(c)) { default: /* ignore them... */ continue; case BTAB: /* a backtab: ESC [ Z */ genkbd_putc(sc, 0x1b); genkbd_putc(sc, '['); genkbd_putc(sc, 'Z'); continue; } } /* normal chars, normal chars with the META, function keys */ switch (KEYFLAGS(c)) { case 0: /* a normal char */ genkbd_putc(sc, KEYCHAR(c)); break; case MKEY: /* the META flag: prepend ESC */ genkbd_putc(sc, 0x1b); genkbd_putc(sc, KEYCHAR(c)); break; case FKEY | SPCLKEY: /* a function key, return string */ cp = kbdd_get_fkeystr(kbd, KEYCHAR(c), &len); if (cp != NULL) { while (len-- > 0) genkbd_putc(sc, *cp++); } break; } } /* wake up sleeping/polling processes */ if (sc->gkb_q_length > 0) { if (sc->gkb_flags & KB_ASLEEP) { sc->gkb_flags &= ~KB_ASLEEP; wakeup(sc); } selwakeuppri(&sc->gkb_rsel, PZERO); } return (0); } #endif /* KBD_INSTALL_CDEV */ /* * Generic low-level keyboard functions * The low-level functions in the keyboard subdriver may use these * functions. */ #ifndef KBD_DISABLE_KEYMAP_LOAD static int key_change_ok(struct keyent_t *, struct keyent_t *, struct thread *); static int keymap_change_ok(keymap_t *, keymap_t *, struct thread *); static int accent_change_ok(accentmap_t *, accentmap_t *, struct thread *); static int fkey_change_ok(fkeytab_t *, fkeyarg_t *, struct thread *); #endif int genkbd_commonioctl(keyboard_t *kbd, u_long cmd, caddr_t arg) { keymap_t *mapp; okeymap_t *omapp; keyarg_t *keyp; fkeyarg_t *fkeyp; int s; int i, j; int error; s = spltty(); switch (cmd) { case KDGKBINFO: /* get keyboard information */ ((keyboard_info_t *)arg)->kb_index = kbd->kb_index; i = imin(strlen(kbd->kb_name) + 1, sizeof(((keyboard_info_t *)arg)->kb_name)); bcopy(kbd->kb_name, ((keyboard_info_t *)arg)->kb_name, i); ((keyboard_info_t *)arg)->kb_unit = kbd->kb_unit; ((keyboard_info_t *)arg)->kb_type = kbd->kb_type; ((keyboard_info_t *)arg)->kb_config = kbd->kb_config; ((keyboard_info_t *)arg)->kb_flags = kbd->kb_flags; break; case KDGKBTYPE: /* get keyboard type */ *(int *)arg = kbd->kb_type; break; case KDGETREPEAT: /* get keyboard repeat rate */ ((int *)arg)[0] = kbd->kb_delay1; ((int *)arg)[1] = kbd->kb_delay2; break; case GIO_KEYMAP: /* get keyboard translation table */ error = copyout(kbd->kb_keymap, *(void **)arg, sizeof(keymap_t)); splx(s); return (error); case OGIO_KEYMAP: /* get keyboard translation table (compat) */ mapp = kbd->kb_keymap; omapp = (okeymap_t *)arg; omapp->n_keys = mapp->n_keys; for (i = 0; i < NUM_KEYS; i++) { for (j = 0; j < NUM_STATES; j++) omapp->key[i].map[j] = mapp->key[i].map[j]; omapp->key[i].spcl = mapp->key[i].spcl; omapp->key[i].flgs = mapp->key[i].flgs; } break; case PIO_KEYMAP: /* set keyboard translation table */ case OPIO_KEYMAP: /* set keyboard translation table (compat) */ #ifndef KBD_DISABLE_KEYMAP_LOAD mapp = malloc(sizeof *mapp, M_TEMP, M_WAITOK); if (cmd == OPIO_KEYMAP) { omapp = (okeymap_t *)arg; mapp->n_keys = omapp->n_keys; for (i = 0; i < NUM_KEYS; i++) { for (j = 0; j < NUM_STATES; j++) mapp->key[i].map[j] = omapp->key[i].map[j]; mapp->key[i].spcl = omapp->key[i].spcl; mapp->key[i].flgs = omapp->key[i].flgs; } } else { error = copyin(*(void **)arg, mapp, sizeof *mapp); if (error != 0) { splx(s); free(mapp, M_TEMP); return (error); } } error = keymap_change_ok(kbd->kb_keymap, mapp, curthread); if (error != 0) { splx(s); free(mapp, M_TEMP); return (error); } bzero(kbd->kb_accentmap, sizeof(*kbd->kb_accentmap)); bcopy(mapp, kbd->kb_keymap, sizeof(*kbd->kb_keymap)); free(mapp, M_TEMP); break; #else splx(s); return (ENODEV); #endif case GIO_KEYMAPENT: /* get keyboard translation table entry */ keyp = (keyarg_t *)arg; if (keyp->keynum >= sizeof(kbd->kb_keymap->key) / sizeof(kbd->kb_keymap->key[0])) { splx(s); return (EINVAL); } bcopy(&kbd->kb_keymap->key[keyp->keynum], &keyp->key, sizeof(keyp->key)); break; case PIO_KEYMAPENT: /* set keyboard translation table entry */ #ifndef KBD_DISABLE_KEYMAP_LOAD keyp = (keyarg_t *)arg; if (keyp->keynum >= sizeof(kbd->kb_keymap->key) / sizeof(kbd->kb_keymap->key[0])) { splx(s); return (EINVAL); } error = key_change_ok(&kbd->kb_keymap->key[keyp->keynum], &keyp->key, curthread); if (error != 0) { splx(s); return (error); } bcopy(&keyp->key, &kbd->kb_keymap->key[keyp->keynum], sizeof(keyp->key)); break; #else splx(s); return (ENODEV); #endif case GIO_DEADKEYMAP: /* get accent key translation table */ bcopy(kbd->kb_accentmap, arg, sizeof(*kbd->kb_accentmap)); break; case PIO_DEADKEYMAP: /* set accent key translation table */ #ifndef KBD_DISABLE_KEYMAP_LOAD error = accent_change_ok(kbd->kb_accentmap, (accentmap_t *)arg, curthread); if (error != 0) { splx(s); return (error); } bcopy(arg, kbd->kb_accentmap, sizeof(*kbd->kb_accentmap)); break; #else splx(s); return (ENODEV); #endif case GETFKEY: /* get functionkey string */ fkeyp = (fkeyarg_t *)arg; if (fkeyp->keynum >= kbd->kb_fkeytab_size) { splx(s); return (EINVAL); } bcopy(kbd->kb_fkeytab[fkeyp->keynum].str, fkeyp->keydef, kbd->kb_fkeytab[fkeyp->keynum].len); fkeyp->flen = kbd->kb_fkeytab[fkeyp->keynum].len; break; case SETFKEY: /* set functionkey string */ #ifndef KBD_DISABLE_KEYMAP_LOAD fkeyp = (fkeyarg_t *)arg; if (fkeyp->keynum >= kbd->kb_fkeytab_size) { splx(s); return (EINVAL); } error = fkey_change_ok(&kbd->kb_fkeytab[fkeyp->keynum], fkeyp, curthread); if (error != 0) { splx(s); return (error); } kbd->kb_fkeytab[fkeyp->keynum].len = min(fkeyp->flen, MAXFK); bcopy(fkeyp->keydef, kbd->kb_fkeytab[fkeyp->keynum].str, kbd->kb_fkeytab[fkeyp->keynum].len); break; #else splx(s); return (ENODEV); #endif default: splx(s); return (ENOIOCTL); } splx(s); return (0); } #ifndef KBD_DISABLE_KEYMAP_LOAD #define RESTRICTED_KEY(key, i) \ ((key->spcl & (0x80 >> i)) && \ (key->map[i] == RBT || key->map[i] == SUSP || \ key->map[i] == STBY || key->map[i] == DBG || \ key->map[i] == PNC || key->map[i] == HALT || \ key->map[i] == PDWN)) static int key_change_ok(struct keyent_t *oldkey, struct keyent_t *newkey, struct thread *td) { int i; /* Low keymap_restrict_change means any changes are OK. */ if (keymap_restrict_change <= 0) return (0); /* High keymap_restrict_change means only root can change the keymap. */ if (keymap_restrict_change >= 2) { for (i = 0; i < NUM_STATES; i++) if (oldkey->map[i] != newkey->map[i]) return priv_check(td, PRIV_KEYBOARD); if (oldkey->spcl != newkey->spcl) return priv_check(td, PRIV_KEYBOARD); if (oldkey->flgs != newkey->flgs) return priv_check(td, PRIV_KEYBOARD); return (0); } /* Otherwise we have to see if any special keys are being changed. */ for (i = 0; i < NUM_STATES; i++) { /* * If either the oldkey or the newkey action is restricted * then we must make sure that the action doesn't change. */ if (!RESTRICTED_KEY(oldkey, i) && !RESTRICTED_KEY(newkey, i)) continue; if ((oldkey->spcl & (0x80 >> i)) == (newkey->spcl & (0x80 >> i)) && oldkey->map[i] == newkey->map[i]) continue; return priv_check(td, PRIV_KEYBOARD); } return (0); } static int keymap_change_ok(keymap_t *oldmap, keymap_t *newmap, struct thread *td) { int keycode, error; for (keycode = 0; keycode < NUM_KEYS; keycode++) { if ((error = key_change_ok(&oldmap->key[keycode], &newmap->key[keycode], td)) != 0) return (error); } return (0); } static int accent_change_ok(accentmap_t *oldmap, accentmap_t *newmap, struct thread *td) { struct acc_t *oldacc, *newacc; int accent, i; if (keymap_restrict_change <= 2) return (0); if (oldmap->n_accs != newmap->n_accs) return priv_check(td, PRIV_KEYBOARD); for (accent = 0; accent < oldmap->n_accs; accent++) { oldacc = &oldmap->acc[accent]; newacc = &newmap->acc[accent]; if (oldacc->accchar != newacc->accchar) return priv_check(td, PRIV_KEYBOARD); for (i = 0; i < NUM_ACCENTCHARS; ++i) { if (oldacc->map[i][0] != newacc->map[i][0]) return priv_check(td, PRIV_KEYBOARD); if (oldacc->map[i][0] == 0) /* end of table */ break; if (oldacc->map[i][1] != newacc->map[i][1]) return priv_check(td, PRIV_KEYBOARD); } } return (0); } static int fkey_change_ok(fkeytab_t *oldkey, fkeyarg_t *newkey, struct thread *td) { if (keymap_restrict_change <= 3) return (0); if (oldkey->len != newkey->flen || bcmp(oldkey->str, newkey->keydef, oldkey->len) != 0) return priv_check(td, PRIV_KEYBOARD); return (0); } #endif /* get a pointer to the string associated with the given function key */ u_char * genkbd_get_fkeystr(keyboard_t *kbd, int fkey, size_t *len) { if (kbd == NULL) return (NULL); fkey -= F_FN; if (fkey > kbd->kb_fkeytab_size) return (NULL); *len = kbd->kb_fkeytab[fkey].len; return (kbd->kb_fkeytab[fkey].str); } /* diagnostic dump */ static char * get_kbd_type_name(int type) { static struct { int type; char *name; } name_table[] = { { KB_84, "AT 84" }, { KB_101, "AT 101/102" }, { KB_OTHER, "generic" }, }; int i; for (i = 0; i < nitems(name_table); ++i) { if (type == name_table[i].type) return (name_table[i].name); } return ("unknown"); } void genkbd_diag(keyboard_t *kbd, int level) { if (level > 0) { printf("kbd%d: %s%d, %s (%d), config:0x%x, flags:0x%x", kbd->kb_index, kbd->kb_name, kbd->kb_unit, get_kbd_type_name(kbd->kb_type), kbd->kb_type, kbd->kb_config, kbd->kb_flags); if (kbd->kb_io_base > 0) printf(", port:0x%x-0x%x", kbd->kb_io_base, kbd->kb_io_base + kbd->kb_io_size - 1); printf("\n"); } } #define set_lockkey_state(k, s, l) \ if (!((s) & l ## DOWN)) { \ int i; \ (s) |= l ## DOWN; \ (s) ^= l ## ED; \ i = (s) & LOCK_MASK; \ (void)kbdd_ioctl((k), KDSETLED, (caddr_t)&i); \ } static u_int save_accent_key(keyboard_t *kbd, u_int key, int *accents) { int i; /* make an index into the accent map */ i = key - F_ACC + 1; if ((i > kbd->kb_accentmap->n_accs) || (kbd->kb_accentmap->acc[i - 1].accchar == 0)) { /* the index is out of range or pointing to an empty entry */ *accents = 0; return (ERRKEY); } /* * If the same accent key has been hit twice, produce the accent * char itself. */ if (i == *accents) { key = kbd->kb_accentmap->acc[i - 1].accchar; *accents = 0; return (key); } /* remember the index and wait for the next key */ *accents = i; return (NOKEY); } static u_int make_accent_char(keyboard_t *kbd, u_int ch, int *accents) { struct acc_t *acc; int i; acc = &kbd->kb_accentmap->acc[*accents - 1]; *accents = 0; /* * If the accent key is followed by the space key, * produce the accent char itself. */ if (ch == ' ') return (acc->accchar); /* scan the accent map */ for (i = 0; i < NUM_ACCENTCHARS; ++i) { if (acc->map[i][0] == 0) /* end of table */ break; if (acc->map[i][0] == ch) return (acc->map[i][1]); } /* this char cannot be accented... */ return (ERRKEY); } int genkbd_keyaction(keyboard_t *kbd, int keycode, int up, int *shiftstate, int *accents) { struct keyent_t *key; int state = *shiftstate; int action; int f; int i; i = keycode; f = state & (AGRS | ALKED); if ((f == AGRS1) || (f == AGRS2) || (f == ALKED)) i += ALTGR_OFFSET; key = &kbd->kb_keymap->key[i]; i = ((state & SHIFTS) ? 1 : 0) | ((state & CTLS) ? 2 : 0) | ((state & ALTS) ? 4 : 0); if (((key->flgs & FLAG_LOCK_C) && (state & CLKED)) || ((key->flgs & FLAG_LOCK_N) && (state & NLKED)) ) i ^= 1; if (up) { /* break: key released */ action = kbd->kb_lastact[keycode]; kbd->kb_lastact[keycode] = NOP; switch (action) { case LSHA: if (state & SHIFTAON) { set_lockkey_state(kbd, state, ALK); state &= ~ALKDOWN; } action = LSH; /* FALL THROUGH */ case LSH: state &= ~SHIFTS1; break; case RSHA: if (state & SHIFTAON) { set_lockkey_state(kbd, state, ALK); state &= ~ALKDOWN; } action = RSH; /* FALL THROUGH */ case RSH: state &= ~SHIFTS2; break; case LCTRA: if (state & SHIFTAON) { set_lockkey_state(kbd, state, ALK); state &= ~ALKDOWN; } action = LCTR; /* FALL THROUGH */ case LCTR: state &= ~CTLS1; break; case RCTRA: if (state & SHIFTAON) { set_lockkey_state(kbd, state, ALK); state &= ~ALKDOWN; } action = RCTR; /* FALL THROUGH */ case RCTR: state &= ~CTLS2; break; case LALTA: if (state & SHIFTAON) { set_lockkey_state(kbd, state, ALK); state &= ~ALKDOWN; } action = LALT; /* FALL THROUGH */ case LALT: state &= ~ALTS1; break; case RALTA: if (state & SHIFTAON) { set_lockkey_state(kbd, state, ALK); state &= ~ALKDOWN; } action = RALT; /* FALL THROUGH */ case RALT: state &= ~ALTS2; break; case ASH: state &= ~AGRS1; break; case META: state &= ~METAS1; break; case NLK: state &= ~NLKDOWN; break; case CLK: state &= ~CLKDOWN; break; case SLK: state &= ~SLKDOWN; break; case ALK: state &= ~ALKDOWN; break; case NOP: /* release events of regular keys are not reported */ *shiftstate &= ~SHIFTAON; return (NOKEY); } *shiftstate = state & ~SHIFTAON; return (SPCLKEY | RELKEY | action); } else { /* make: key pressed */ action = key->map[i]; state &= ~SHIFTAON; if (key->spcl & (0x80 >> i)) { /* special keys */ if (kbd->kb_lastact[keycode] == NOP) kbd->kb_lastact[keycode] = action; if (kbd->kb_lastact[keycode] != action) action = NOP; switch (action) { /* LOCKING KEYS */ case NLK: set_lockkey_state(kbd, state, NLK); break; case CLK: set_lockkey_state(kbd, state, CLK); break; case SLK: set_lockkey_state(kbd, state, SLK); break; case ALK: set_lockkey_state(kbd, state, ALK); break; /* NON-LOCKING KEYS */ case SPSC: case RBT: case SUSP: case STBY: case DBG: case NEXT: case PREV: case PNC: case HALT: case PDWN: *accents = 0; break; case BTAB: *accents = 0; action |= BKEY; break; case LSHA: state |= SHIFTAON; action = LSH; /* FALL THROUGH */ case LSH: state |= SHIFTS1; break; case RSHA: state |= SHIFTAON; action = RSH; /* FALL THROUGH */ case RSH: state |= SHIFTS2; break; case LCTRA: state |= SHIFTAON; action = LCTR; /* FALL THROUGH */ case LCTR: state |= CTLS1; break; case RCTRA: state |= SHIFTAON; action = RCTR; /* FALL THROUGH */ case RCTR: state |= CTLS2; break; case LALTA: state |= SHIFTAON; action = LALT; /* FALL THROUGH */ case LALT: state |= ALTS1; break; case RALTA: state |= SHIFTAON; action = RALT; /* FALL THROUGH */ case RALT: state |= ALTS2; break; case ASH: state |= AGRS1; break; case META: state |= METAS1; break; case NOP: *shiftstate = state; return (NOKEY); default: /* is this an accent (dead) key? */ *shiftstate = state; if (action >= F_ACC && action <= L_ACC) { action = save_accent_key(kbd, action, accents); switch (action) { case NOKEY: case ERRKEY: return (action); default: if (state & METAS) return (action | MKEY); else return (action); } /* NOT REACHED */ } /* other special keys */ if (*accents > 0) { *accents = 0; return (ERRKEY); } if (action >= F_FN && action <= L_FN) action |= FKEY; /* XXX: return fkey string for the FKEY? */ return (SPCLKEY | action); } *shiftstate = state; return (SPCLKEY | action); } else { /* regular keys */ kbd->kb_lastact[keycode] = NOP; *shiftstate = state; if (*accents > 0) { /* make an accented char */ action = make_accent_char(kbd, action, accents); if (action == ERRKEY) return (action); } if (state & METAS) action |= MKEY; return (action); } } /* NOT REACHED */ } Index: head/sys/dev/liquidio/base/lio_request_manager.c =================================================================== --- head/sys/dev/liquidio/base/lio_request_manager.c (revision 327948) +++ head/sys/dev/liquidio/base/lio_request_manager.c (revision 327949) @@ -1,858 +1,858 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_main.h" #include "lio_network.h" #include "cn23xx_pf_device.h" #include "lio_rxtx.h" struct lio_iq_post_status { int status; int index; }; static void lio_check_db_timeout(void *arg, int pending); static void __lio_check_db_timeout(struct octeon_device *oct, uint64_t iq_no); /* Return 0 on success, 1 on failure */ int lio_init_instr_queue(struct octeon_device *oct, union octeon_txpciq txpciq, uint32_t num_descs) { struct lio_instr_queue *iq; struct lio_iq_config *conf = NULL; struct lio_tq *db_tq; struct lio_request_list *request_buf; bus_size_t max_size; uint32_t iq_no = (uint32_t)txpciq.s.q_no; uint32_t q_size; int error, i; if (LIO_CN23XX_PF(oct)) conf = &(LIO_GET_IQ_CFG(LIO_CHIP_CONF(oct, cn23xx_pf))); if (conf == NULL) { lio_dev_err(oct, "Unsupported Chip %x\n", oct->chip_id); return (1); } q_size = (uint32_t)conf->instr_type * num_descs; iq = oct->instr_queue[iq_no]; iq->oct_dev = oct; max_size = LIO_CN23XX_PKI_MAX_FRAME_SIZE * num_descs; error = bus_dma_tag_create(bus_get_dma_tag(oct->device), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ max_size, /* maxsize */ LIO_MAX_SG, /* nsegments */ PAGE_SIZE, /* maxsegsize */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &iq->txtag); if (error) { lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n", iq_no); return (1); } iq->base_addr = lio_dma_alloc(q_size, (vm_paddr_t *)&iq->base_addr_dma); if (!iq->base_addr) { lio_dev_err(oct, "Cannot allocate memory for instr queue %d\n", iq_no); return (1); } iq->max_count = num_descs; /* * Initialize a list to holds requests that have been posted to * Octeon but has yet to be fetched by octeon */ - iq->request_list = malloc(sizeof(*iq->request_list) * num_descs, + iq->request_list = mallocarray(num_descs, sizeof(*iq->request_list), M_DEVBUF, M_NOWAIT | M_ZERO); if (iq->request_list == NULL) { lio_dev_err(oct, "Alloc failed for IQ[%d] nr free list\n", iq_no); return (1); } lio_dev_dbg(oct, "IQ[%d]: base: %p basedma: %llx count: %d\n", iq_no, iq->base_addr, LIO_CAST64(iq->base_addr_dma), iq->max_count); /* Create the descriptor buffer dma maps */ request_buf = iq->request_list; for (i = 0; i < num_descs; i++, request_buf++) { error = bus_dmamap_create(iq->txtag, 0, &request_buf->map); if (error) { lio_dev_err(oct, "Unable to create TX DMA map\n"); return (1); } } iq->txpciq.txpciq64 = txpciq.txpciq64; iq->fill_cnt = 0; iq->host_write_index = 0; iq->octeon_read_index = 0; iq->flush_index = 0; iq->last_db_time = 0; iq->db_timeout = (uint32_t)conf->db_timeout; atomic_store_rel_int(&iq->instr_pending, 0); /* Initialize the lock for this instruction queue */ mtx_init(&iq->lock, "Tx_lock", NULL, MTX_DEF); mtx_init(&iq->post_lock, "iq_post_lock", NULL, MTX_DEF); mtx_init(&iq->enq_lock, "enq_lock", NULL, MTX_DEF); mtx_init(&iq->iq_flush_running_lock, "iq_flush_running_lock", NULL, MTX_DEF); oct->io_qmask.iq |= BIT_ULL(iq_no); /* Set the 32B/64B mode for each input queue */ oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no); iq->iqcmd_64B = (conf->instr_type == 64); oct->fn_list.setup_iq_regs(oct, iq_no); db_tq = &oct->check_db_tq[iq_no]; db_tq->tq = taskqueue_create("lio_check_db_timeout", M_WAITOK, taskqueue_thread_enqueue, &db_tq->tq); if (db_tq->tq == NULL) { lio_dev_err(oct, "check db wq create failed for iq %d\n", iq_no); return (1); } TIMEOUT_TASK_INIT(db_tq->tq, &db_tq->work, 0, lio_check_db_timeout, (void *)db_tq); db_tq->ctxul = iq_no; db_tq->ctxptr = oct; taskqueue_start_threads(&db_tq->tq, 1, PI_NET, "lio%d_check_db_timeout:%d", oct->octeon_id, iq_no); taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work, 1); /* Allocate a buf ring */ oct->instr_queue[iq_no]->br = buf_ring_alloc(LIO_BR_SIZE, M_DEVBUF, M_WAITOK, &oct->instr_queue[iq_no]->enq_lock); if (oct->instr_queue[iq_no]->br == NULL) { lio_dev_err(oct, "Critical Failure setting up buf ring\n"); return (1); } return (0); } int lio_delete_instr_queue(struct octeon_device *oct, uint32_t iq_no) { struct lio_instr_queue *iq = oct->instr_queue[iq_no]; struct lio_request_list *request_buf; struct lio_mbuf_free_info *finfo; uint64_t desc_size = 0, q_size; int i; lio_dev_dbg(oct, "%s[%d]\n", __func__, iq_no); if (oct->check_db_tq[iq_no].tq != NULL) { while (taskqueue_cancel_timeout(oct->check_db_tq[iq_no].tq, &oct->check_db_tq[iq_no].work, NULL)) taskqueue_drain_timeout(oct->check_db_tq[iq_no].tq, &oct->check_db_tq[iq_no].work); taskqueue_free(oct->check_db_tq[iq_no].tq); oct->check_db_tq[iq_no].tq = NULL; } if (LIO_CN23XX_PF(oct)) desc_size = LIO_GET_IQ_INSTR_TYPE_CFG(LIO_CHIP_CONF(oct, cn23xx_pf)); request_buf = iq->request_list; for (i = 0; i < iq->max_count; i++, request_buf++) { if ((request_buf->reqtype == LIO_REQTYPE_NORESP_NET) || (request_buf->reqtype == LIO_REQTYPE_NORESP_NET_SG)) { if (request_buf->buf != NULL) { finfo = request_buf->buf; bus_dmamap_sync(iq->txtag, request_buf->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(iq->txtag, request_buf->map); m_freem(finfo->mb); request_buf->buf = NULL; if (request_buf->map != NULL) { bus_dmamap_destroy(iq->txtag, request_buf->map); request_buf->map = NULL; } } else if (request_buf->map != NULL) { bus_dmamap_unload(iq->txtag, request_buf->map); bus_dmamap_destroy(iq->txtag, request_buf->map); request_buf->map = NULL; } } } if (iq->br != NULL) { buf_ring_free(iq->br, M_DEVBUF); iq->br = NULL; } if (iq->request_list != NULL) { free(iq->request_list, M_DEVBUF); iq->request_list = NULL; } if (iq->txtag != NULL) { bus_dma_tag_destroy(iq->txtag); iq->txtag = NULL; } if (iq->base_addr) { q_size = iq->max_count * desc_size; lio_dma_free((uint32_t)q_size, iq->base_addr); oct->io_qmask.iq &= ~(1ULL << iq_no); bzero(oct->instr_queue[iq_no], sizeof(struct lio_instr_queue)); oct->num_iqs--; return (0); } return (1); } /* Return 0 on success, 1 on failure */ int lio_setup_iq(struct octeon_device *oct, int ifidx, int q_index, union octeon_txpciq txpciq, uint32_t num_descs) { uint32_t iq_no = (uint32_t)txpciq.s.q_no; if (oct->instr_queue[iq_no]->oct_dev != NULL) { lio_dev_dbg(oct, "IQ is in use. Cannot create the IQ: %d again\n", iq_no); oct->instr_queue[iq_no]->txpciq.txpciq64 = txpciq.txpciq64; return (0); } oct->instr_queue[iq_no]->q_index = q_index; oct->instr_queue[iq_no]->ifidx = ifidx; if (lio_init_instr_queue(oct, txpciq, num_descs)) { lio_delete_instr_queue(oct, iq_no); return (1); } oct->num_iqs++; if (oct->fn_list.enable_io_queues(oct)) return (1); return (0); } int lio_wait_for_instr_fetch(struct octeon_device *oct) { int i, retry = 1000, pending, instr_cnt = 0; do { instr_cnt = 0; for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) continue; pending = atomic_load_acq_int( &oct->instr_queue[i]->instr_pending); if (pending) __lio_check_db_timeout(oct, i); instr_cnt += pending; } if (instr_cnt == 0) break; lio_sleep_timeout(1); } while (retry-- && instr_cnt); return (instr_cnt); } static inline void lio_ring_doorbell(struct octeon_device *oct, struct lio_instr_queue *iq) { if (atomic_load_acq_int(&oct->status) == LIO_DEV_RUNNING) { lio_write_csr32(oct, iq->doorbell_reg, iq->fill_cnt); /* make sure doorbell write goes through */ __compiler_membar(); iq->fill_cnt = 0; iq->last_db_time = ticks; return; } } static inline void __lio_copy_cmd_into_iq(struct lio_instr_queue *iq, uint8_t *cmd) { uint8_t *iqptr, cmdsize; cmdsize = ((iq->iqcmd_64B) ? 64 : 32); iqptr = iq->base_addr + (cmdsize * iq->host_write_index); memcpy(iqptr, cmd, cmdsize); } static inline struct lio_iq_post_status __lio_post_command2(struct lio_instr_queue *iq, uint8_t *cmd) { struct lio_iq_post_status st; st.status = LIO_IQ_SEND_OK; /* * This ensures that the read index does not wrap around to the same * position if queue gets full before Octeon could fetch any instr. */ if (atomic_load_acq_int(&iq->instr_pending) >= (int32_t)(iq->max_count - 1)) { st.status = LIO_IQ_SEND_FAILED; st.index = -1; return (st); } if (atomic_load_acq_int(&iq->instr_pending) >= (int32_t)(iq->max_count - 2)) st.status = LIO_IQ_SEND_STOP; __lio_copy_cmd_into_iq(iq, cmd); /* "index" is returned, host_write_index is modified. */ st.index = iq->host_write_index; iq->host_write_index = lio_incr_index(iq->host_write_index, 1, iq->max_count); iq->fill_cnt++; /* * Flush the command into memory. We need to be sure the data is in * memory before indicating that the instruction is pending. */ wmb(); atomic_add_int(&iq->instr_pending, 1); return (st); } static inline void __lio_add_to_request_list(struct lio_instr_queue *iq, int idx, void *buf, int reqtype) { iq->request_list[idx].buf = buf; iq->request_list[idx].reqtype = reqtype; } /* Can only run in process context */ int lio_process_iq_request_list(struct octeon_device *oct, struct lio_instr_queue *iq, uint32_t budget) { struct lio_soft_command *sc; struct octeon_instr_irh *irh = NULL; struct lio_mbuf_free_info *finfo; void *buf; uint32_t inst_count = 0; uint32_t old = iq->flush_index; int reqtype; while (old != iq->octeon_read_index) { reqtype = iq->request_list[old].reqtype; buf = iq->request_list[old].buf; finfo = buf; if (reqtype == LIO_REQTYPE_NONE) goto skip_this; switch (reqtype) { case LIO_REQTYPE_NORESP_NET: lio_free_mbuf(iq, buf); break; case LIO_REQTYPE_NORESP_NET_SG: lio_free_sgmbuf(iq, buf); break; case LIO_REQTYPE_RESP_NET: case LIO_REQTYPE_SOFT_COMMAND: sc = buf; if (LIO_CN23XX_PF(oct)) irh = (struct octeon_instr_irh *) &sc->cmd.cmd3.irh; if (irh->rflag) { /* * We're expecting a response from Octeon. * It's up to lio_process_ordered_list() to * process sc. Add sc to the ordered soft * command response list because we expect * a response from Octeon. */ mtx_lock(&oct->response_list [LIO_ORDERED_SC_LIST].lock); atomic_add_int(&oct->response_list [LIO_ORDERED_SC_LIST]. pending_req_count, 1); STAILQ_INSERT_TAIL(&oct->response_list [LIO_ORDERED_SC_LIST]. head, &sc->node, entries); mtx_unlock(&oct->response_list [LIO_ORDERED_SC_LIST].lock); } else { if (sc->callback != NULL) { /* This callback must not sleep */ sc->callback(oct, LIO_REQUEST_DONE, sc->callback_arg); } } break; default: lio_dev_err(oct, "%s Unknown reqtype: %d buf: %p at idx %d\n", __func__, reqtype, buf, old); } iq->request_list[old].buf = NULL; iq->request_list[old].reqtype = 0; skip_this: inst_count++; old = lio_incr_index(old, 1, iq->max_count); if ((budget) && (inst_count >= budget)) break; } iq->flush_index = old; return (inst_count); } /* Can only be called from process context */ int lio_flush_iq(struct octeon_device *oct, struct lio_instr_queue *iq, uint32_t budget) { uint32_t inst_processed = 0; uint32_t tot_inst_processed = 0; int tx_done = 1; if (!mtx_trylock(&iq->iq_flush_running_lock)) return (tx_done); mtx_lock(&iq->lock); iq->octeon_read_index = oct->fn_list.update_iq_read_idx(iq); do { /* Process any outstanding IQ packets. */ if (iq->flush_index == iq->octeon_read_index) break; if (budget) inst_processed = lio_process_iq_request_list(oct, iq, budget - tot_inst_processed); else inst_processed = lio_process_iq_request_list(oct, iq, 0); if (inst_processed) { atomic_subtract_int(&iq->instr_pending, inst_processed); iq->stats.instr_processed += inst_processed; } tot_inst_processed += inst_processed; inst_processed = 0; } while (tot_inst_processed < budget); if (budget && (tot_inst_processed >= budget)) tx_done = 0; iq->last_db_time = ticks; mtx_unlock(&iq->lock); mtx_unlock(&iq->iq_flush_running_lock); return (tx_done); } /* * Process instruction queue after timeout. * This routine gets called from a taskqueue or when removing the module. */ static void __lio_check_db_timeout(struct octeon_device *oct, uint64_t iq_no) { struct lio_instr_queue *iq; uint64_t next_time; if (oct == NULL) return; iq = oct->instr_queue[iq_no]; if (iq == NULL) return; if (atomic_load_acq_int(&iq->instr_pending)) { /* If ticks - last_db_time < db_timeout do nothing */ next_time = iq->last_db_time + lio_ms_to_ticks(iq->db_timeout); if (!lio_check_timeout(ticks, next_time)) return; iq->last_db_time = ticks; /* Flush the instruction queue */ lio_flush_iq(oct, iq, 0); lio_enable_irq(NULL, iq); } if (oct->props.ifp != NULL && iq->br != NULL) { if (mtx_trylock(&iq->enq_lock)) { if (!drbr_empty(oct->props.ifp, iq->br)) lio_mq_start_locked(oct->props.ifp, iq); mtx_unlock(&iq->enq_lock); } } } /* * Called by the Poll thread at regular intervals to check the instruction * queue for commands to be posted and for commands that were fetched by Octeon. */ static void lio_check_db_timeout(void *arg, int pending) { struct lio_tq *db_tq = (struct lio_tq *)arg; struct octeon_device *oct = db_tq->ctxptr; uint64_t iq_no = db_tq->ctxul; uint32_t delay = 10; __lio_check_db_timeout(oct, iq_no); taskqueue_enqueue_timeout(db_tq->tq, &db_tq->work, lio_ms_to_ticks(delay)); } int lio_send_command(struct octeon_device *oct, uint32_t iq_no, uint32_t force_db, void *cmd, void *buf, uint32_t datasize, uint32_t reqtype) { struct lio_iq_post_status st; struct lio_instr_queue *iq = oct->instr_queue[iq_no]; /* * Get the lock and prevent other tasks and tx interrupt handler * from running. */ mtx_lock(&iq->post_lock); st = __lio_post_command2(iq, cmd); if (st.status != LIO_IQ_SEND_FAILED) { __lio_add_to_request_list(iq, st.index, buf, reqtype); LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, bytes_sent, datasize); LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_posted, 1); if (force_db || (st.status == LIO_IQ_SEND_STOP)) lio_ring_doorbell(oct, iq); } else { LIO_INCR_INSTRQUEUE_PKT_COUNT(oct, iq_no, instr_dropped, 1); } mtx_unlock(&iq->post_lock); /* * This is only done here to expedite packets being flushed for * cases where there are no IQ completion interrupts. */ return (st.status); } void lio_prepare_soft_command(struct octeon_device *oct, struct lio_soft_command *sc, uint8_t opcode, uint8_t subcode, uint32_t irh_ossp, uint64_t ossp0, uint64_t ossp1) { struct lio_config *lio_cfg; struct octeon_instr_ih3 *ih3; struct octeon_instr_pki_ih3 *pki_ih3; struct octeon_instr_irh *irh; struct octeon_instr_rdp *rdp; KASSERT(opcode <= 15, ("%s, %d, opcode > 15", __func__, __LINE__)); KASSERT(subcode <= 127, ("%s, %d, opcode > 127", __func__, __LINE__)); lio_cfg = lio_get_conf(oct); if (LIO_CN23XX_PF(oct)) { ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; ih3->pkind = oct->instr_queue[sc->iq_no]->txpciq.s.pkind; pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3; pki_ih3->w = 1; pki_ih3->raw = 1; pki_ih3->utag = 1; pki_ih3->uqpg = oct->instr_queue[sc->iq_no]->txpciq.s.use_qpg; pki_ih3->utt = 1; pki_ih3->tag = LIO_CONTROL; pki_ih3->tagtype = LIO_ATOMIC_TAG; pki_ih3->qpg = oct->instr_queue[sc->iq_no]->txpciq.s.qpg; pki_ih3->pm = 0x7; pki_ih3->sl = 8; if (sc->datasize) ih3->dlengsz = sc->datasize; irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; irh->opcode = opcode; irh->subcode = subcode; /* opcode/subcode specific parameters (ossp) */ irh->ossp = irh_ossp; sc->cmd.cmd3.ossp[0] = ossp0; sc->cmd.cmd3.ossp[1] = ossp1; if (sc->rdatasize) { rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp; rdp->pcie_port = oct->pcie_port; rdp->rlen = sc->rdatasize; irh->rflag = 1; /* PKI IH3 */ /* pki_ih3 irh+ossp[0]+ossp[1]+rdp+rptr = 48 bytes */ ih3->fsz = LIO_SOFTCMDRESP_IH3; } else { irh->rflag = 0; /* PKI IH3 */ /* pki_h3 + irh + ossp[0] + ossp[1] = 32 bytes */ ih3->fsz = LIO_PCICMD_O3; } } } int lio_send_soft_command(struct octeon_device *oct, struct lio_soft_command *sc) { struct octeon_instr_ih3 *ih3; struct octeon_instr_irh *irh; uint32_t len = 0; if (LIO_CN23XX_PF(oct)) { ih3 = (struct octeon_instr_ih3 *)&sc->cmd.cmd3.ih3; if (ih3->dlengsz) { KASSERT(sc->dmadptr, ("%s, %d, sc->dmadptr is NULL", __func__, __LINE__)); sc->cmd.cmd3.dptr = sc->dmadptr; } irh = (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; if (irh->rflag) { KASSERT(sc->dmarptr, ("%s, %d, sc->dmarptr is NULL", __func__, __LINE__)); KASSERT(sc->status_word, ("%s, %d, sc->status_word is NULL", __func__, __LINE__)); *sc->status_word = COMPLETION_WORD_INIT; sc->cmd.cmd3.rptr = sc->dmarptr; } len = (uint32_t)ih3->dlengsz; } if (sc->wait_time) sc->timeout = ticks + lio_ms_to_ticks(sc->wait_time); return (lio_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, len, LIO_REQTYPE_SOFT_COMMAND)); } int lio_setup_sc_buffer_pool(struct octeon_device *oct) { struct lio_soft_command *sc; uint64_t dma_addr; int i; STAILQ_INIT(&oct->sc_buf_pool.head); mtx_init(&oct->sc_buf_pool.lock, "sc_pool_lock", NULL, MTX_DEF); atomic_store_rel_int(&oct->sc_buf_pool.alloc_buf_count, 0); for (i = 0; i < LIO_MAX_SOFT_COMMAND_BUFFERS; i++) { sc = (struct lio_soft_command *) lio_dma_alloc(LIO_SOFT_COMMAND_BUFFER_SIZE, (vm_paddr_t *)&dma_addr); if (sc == NULL) { lio_free_sc_buffer_pool(oct); return (1); } sc->dma_addr = dma_addr; sc->size = LIO_SOFT_COMMAND_BUFFER_SIZE; STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries); } return (0); } int lio_free_sc_buffer_pool(struct octeon_device *oct) { struct lio_stailq_node *tmp, *tmp2; struct lio_soft_command *sc; mtx_lock(&oct->sc_buf_pool.lock); STAILQ_FOREACH_SAFE(tmp, &oct->sc_buf_pool.head, entries, tmp2) { sc = LIO_STAILQ_FIRST_ENTRY(&oct->sc_buf_pool.head, struct lio_soft_command, node); STAILQ_REMOVE_HEAD(&oct->sc_buf_pool.head, entries); lio_dma_free(sc->size, sc); } STAILQ_INIT(&oct->sc_buf_pool.head); mtx_unlock(&oct->sc_buf_pool.lock); return (0); } struct lio_soft_command * lio_alloc_soft_command(struct octeon_device *oct, uint32_t datasize, uint32_t rdatasize, uint32_t ctxsize) { struct lio_soft_command *sc = NULL; struct lio_stailq_node *tmp; uint64_t dma_addr; uint32_t size; uint32_t offset = sizeof(struct lio_soft_command); KASSERT((offset + datasize + rdatasize + ctxsize) <= LIO_SOFT_COMMAND_BUFFER_SIZE, ("%s, %d, offset + datasize + rdatasize + ctxsize > LIO_SOFT_COMMAND_BUFFER_SIZE", __func__, __LINE__)); mtx_lock(&oct->sc_buf_pool.lock); if (STAILQ_EMPTY(&oct->sc_buf_pool.head)) { mtx_unlock(&oct->sc_buf_pool.lock); return (NULL); } tmp = STAILQ_LAST(&oct->sc_buf_pool.head, lio_stailq_node, entries); STAILQ_REMOVE(&oct->sc_buf_pool.head, tmp, lio_stailq_node, entries); atomic_add_int(&oct->sc_buf_pool.alloc_buf_count, 1); mtx_unlock(&oct->sc_buf_pool.lock); sc = (struct lio_soft_command *)tmp; dma_addr = sc->dma_addr; size = sc->size; bzero(sc, sc->size); sc->dma_addr = dma_addr; sc->size = size; if (ctxsize) { sc->ctxptr = (uint8_t *)sc + offset; sc->ctxsize = ctxsize; } /* Start data at 128 byte boundary */ offset = (offset + ctxsize + 127) & 0xffffff80; if (datasize) { sc->virtdptr = (uint8_t *)sc + offset; sc->dmadptr = dma_addr + offset; sc->datasize = datasize; } /* Start rdata at 128 byte boundary */ offset = (offset + datasize + 127) & 0xffffff80; if (rdatasize) { KASSERT(rdatasize >= 16, ("%s, %d, rdatasize < 16", __func__, __LINE__)); sc->virtrptr = (uint8_t *)sc + offset; sc->dmarptr = dma_addr + offset; sc->rdatasize = rdatasize; sc->status_word = (uint64_t *)((uint8_t *)(sc->virtrptr) + rdatasize - 8); } return (sc); } void lio_free_soft_command(struct octeon_device *oct, struct lio_soft_command *sc) { mtx_lock(&oct->sc_buf_pool.lock); STAILQ_INSERT_TAIL(&oct->sc_buf_pool.head, &sc->node, entries); atomic_subtract_int(&oct->sc_buf_pool.alloc_buf_count, 1); mtx_unlock(&oct->sc_buf_pool.lock); } Index: head/sys/dev/liquidio/lio_main.c =================================================================== --- head/sys/dev/liquidio/lio_main.c (revision 327948) +++ head/sys/dev/liquidio/lio_main.c (revision 327949) @@ -1,2310 +1,2310 @@ /* * BSD LICENSE * * Copyright(c) 2017 Cavium, Inc.. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Cavium, Inc. nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER(S) OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /*$FreeBSD$*/ #include "lio_bsd.h" #include "lio_common.h" #include "lio_droq.h" #include "lio_iq.h" #include "lio_response_manager.h" #include "lio_device.h" #include "lio_ctrl.h" #include "lio_main.h" #include "lio_network.h" #include "cn23xx_pf_device.h" #include "lio_image.h" #include "lio_ioctl.h" #include "lio_rxtx.h" #include "lio_rss.h" /* Number of milliseconds to wait for DDR initialization */ #define LIO_DDR_TIMEOUT 10000 #define LIO_MAX_FW_TYPE_LEN 8 static char fw_type[LIO_MAX_FW_TYPE_LEN]; TUNABLE_STR("hw.lio.fw_type", fw_type, sizeof(fw_type)); /* * Integers that specify number of queues per PF. * Valid range is 0 to 64. * Use 0 to derive from CPU count. */ static int num_queues_per_pf0; static int num_queues_per_pf1; TUNABLE_INT("hw.lio.num_queues_per_pf0", &num_queues_per_pf0); TUNABLE_INT("hw.lio.num_queues_per_pf1", &num_queues_per_pf1); #ifdef RSS static int lio_rss = 1; TUNABLE_INT("hw.lio.rss", &lio_rss); #endif /* RSS */ /* Hardware LRO */ unsigned int lio_hwlro = 0; TUNABLE_INT("hw.lio.hwlro", &lio_hwlro); /* * Bitmask indicating which consoles have debug * output redirected to syslog. */ static unsigned long console_bitmask; TUNABLE_ULONG("hw.lio.console_bitmask", &console_bitmask); /* * \brief determines if a given console has debug enabled. * @param console console to check * @returns 1 = enabled. 0 otherwise */ int lio_console_debug_enabled(uint32_t console) { return (console_bitmask >> (console)) & 0x1; } static int lio_detach(device_t dev); static int lio_device_init(struct octeon_device *octeon_dev); static int lio_chip_specific_setup(struct octeon_device *oct); static void lio_watchdog(void *param); static int lio_load_firmware(struct octeon_device *oct); static int lio_nic_starter(struct octeon_device *oct); static int lio_init_nic_module(struct octeon_device *oct); static int lio_setup_nic_devices(struct octeon_device *octeon_dev); static int lio_link_info(struct lio_recv_info *recv_info, void *ptr); static void lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf); static int lio_set_rxcsum_command(struct ifnet *ifp, int command, uint8_t rx_cmd); static int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs); static void lio_destroy_nic_device(struct octeon_device *oct, int ifidx); static inline void lio_update_link_status(struct ifnet *ifp, union octeon_link_status *ls); static void lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop); static int lio_stop_nic_module(struct octeon_device *oct); static void lio_destroy_resources(struct octeon_device *oct); static int lio_setup_rx_oom_poll_fn(struct ifnet *ifp); static void lio_vlan_rx_add_vid(void *arg, struct ifnet *ifp, uint16_t vid); static void lio_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, uint16_t vid); static struct octeon_device * lio_get_other_octeon_device(struct octeon_device *oct); static int lio_wait_for_oq_pkts(struct octeon_device *oct); int lio_send_rss_param(struct lio *lio); static int lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num, char *prefix, char *suffix); /* Polling interval for determining when NIC application is alive */ #define LIO_STARTER_POLL_INTERVAL_MS 100 /* * vendor_info_array. * This array contains the list of IDs on which the driver should load. */ struct lio_vendor_info { uint16_t vendor_id; uint16_t device_id; uint16_t subdevice_id; uint8_t revision_id; uint8_t index; }; static struct lio_vendor_info lio_pci_tbl[] = { /* CN2350 10G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE, 0x02, 0}, /* CN2350 10G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_10G_SUBDEVICE1, 0x02, 0}, /* CN2360 10G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_10G_SUBDEVICE, 0x02, 1}, /* CN2350 25G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2350_25G_SUBDEVICE, 0x02, 2}, /* CN2360 25G */ {PCI_VENDOR_ID_CAVIUM, LIO_CN23XX_PF_VID, LIO_CN2360_25G_SUBDEVICE, 0x02, 3}, {0, 0, 0, 0, 0} }; static char *lio_strings[] = { "LiquidIO 2350 10GbE Server Adapter", "LiquidIO 2360 10GbE Server Adapter", "LiquidIO 2350 25GbE Server Adapter", "LiquidIO 2360 25GbE Server Adapter", }; struct lio_if_cfg_resp { uint64_t rh; struct octeon_if_cfg_info cfg_info; uint64_t status; }; struct lio_if_cfg_context { int octeon_id; volatile int cond; }; struct lio_rx_ctl_context { int octeon_id; volatile int cond; }; static int lio_probe(device_t dev) { struct lio_vendor_info *tbl; uint16_t vendor_id; uint16_t device_id; uint16_t subdevice_id; uint8_t revision_id; char device_ver[256]; vendor_id = pci_get_vendor(dev); if (vendor_id != PCI_VENDOR_ID_CAVIUM) return (ENXIO); device_id = pci_get_device(dev); subdevice_id = pci_get_subdevice(dev); revision_id = pci_get_revid(dev); tbl = lio_pci_tbl; while (tbl->vendor_id) { if ((vendor_id == tbl->vendor_id) && (device_id == tbl->device_id) && (subdevice_id == tbl->subdevice_id) && (revision_id == tbl->revision_id)) { sprintf(device_ver, "%s, Version - %s", lio_strings[tbl->index], LIO_VERSION); device_set_desc_copy(dev, device_ver); return (BUS_PROBE_DEFAULT); } tbl++; } return (ENXIO); } static int lio_attach(device_t device) { struct octeon_device *oct_dev = NULL; uint64_t scratch1; uint32_t error; int timeout, ret = 1; uint8_t bus, dev, function; oct_dev = lio_allocate_device(device); if (oct_dev == NULL) { device_printf(device, "Error: Unable to allocate device\n"); return (-ENOMEM); } oct_dev->tx_budget = LIO_DEFAULT_TX_PKTS_PROCESS_BUDGET; oct_dev->rx_budget = LIO_DEFAULT_RX_PKTS_PROCESS_BUDGET; oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED; oct_dev->device = device; bus = pci_get_bus(device); dev = pci_get_slot(device); function = pci_get_function(device); lio_dev_info(oct_dev, "Initializing device %x:%x %02x:%02x.%01x\n", pci_get_vendor(device), pci_get_device(device), bus, dev, function); if (lio_device_init(oct_dev)) { lio_dev_err(oct_dev, "Failed to init device\n"); lio_detach(device); return (-ENOMEM); } scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1); if (!(scratch1 & 4ULL)) { /* * Bit 2 of SLI_SCRATCH_1 is a flag that indicates that * the lio watchdog kernel thread is running for this * NIC. Each NIC gets one watchdog kernel thread. */ scratch1 |= 4ULL; lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1); error = kproc_create(lio_watchdog, oct_dev, &oct_dev->watchdog_task, 0, 0, "liowd/%02hhx:%02hhx.%hhx", bus, dev, function); if (!error) { kproc_resume(oct_dev->watchdog_task); } else { oct_dev->watchdog_task = NULL; lio_dev_err(oct_dev, "failed to create kernel_thread\n"); lio_detach(device); return (-1); } } oct_dev->rx_pause = 1; oct_dev->tx_pause = 1; timeout = 0; while (timeout < LIO_NIC_STARTER_TIMEOUT) { lio_mdelay(LIO_STARTER_POLL_INTERVAL_MS); timeout += LIO_STARTER_POLL_INTERVAL_MS; /* * During the boot process interrupts are not available. * So polling for first control message from FW. */ if (cold) lio_droq_bh(oct_dev->droq[0], 0); if (atomic_load_acq_int(&oct_dev->status) == LIO_DEV_CORE_OK) { ret = lio_nic_starter(oct_dev); break; } } if (ret) { lio_dev_err(oct_dev, "Firmware failed to start\n"); lio_detach(device); return (-EIO); } lio_dev_dbg(oct_dev, "Device is ready\n"); return (0); } static int lio_detach(device_t dev) { struct octeon_device *oct_dev = device_get_softc(dev); lio_dev_dbg(oct_dev, "Stopping device\n"); if (oct_dev->watchdog_task) { uint64_t scratch1; kproc_suspend(oct_dev->watchdog_task, 0); scratch1 = lio_read_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1); scratch1 &= ~4ULL; lio_write_csr64(oct_dev, LIO_CN23XX_SLI_SCRATCH1, scratch1); } if (oct_dev->app_mode && (oct_dev->app_mode == LIO_DRV_NIC_APP)) lio_stop_nic_module(oct_dev); /* * Reset the octeon device and cleanup all memory allocated for * the octeon device by driver. */ lio_destroy_resources(oct_dev); lio_dev_info(oct_dev, "Device removed\n"); /* * This octeon device has been removed. Update the global * data structure to reflect this. Free the device structure. */ lio_free_device_mem(oct_dev); return (0); } static int lio_shutdown(device_t dev) { struct octeon_device *oct_dev = device_get_softc(dev); struct lio *lio = if_getsoftc(oct_dev->props.ifp); lio_send_rx_ctrl_cmd(lio, 0); return (0); } static int lio_suspend(device_t dev) { return (ENXIO); } static int lio_resume(device_t dev) { return (ENXIO); } static int lio_event(struct module *mod, int event, void *junk) { switch (event) { case MOD_LOAD: lio_init_device_list(LIO_CFG_TYPE_DEFAULT); break; default: break; } return (0); } /********************************************************************* * FreeBSD Device Interface Entry Points * *******************************************************************/ static device_method_t lio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, lio_probe), DEVMETHOD(device_attach, lio_attach), DEVMETHOD(device_detach, lio_detach), DEVMETHOD(device_shutdown, lio_shutdown), DEVMETHOD(device_suspend, lio_suspend), DEVMETHOD(device_resume, lio_resume), DEVMETHOD_END }; static driver_t lio_driver = { LIO_DRV_NAME, lio_methods, sizeof(struct octeon_device), }; devclass_t lio_devclass; DRIVER_MODULE(lio, pci, lio_driver, lio_devclass, lio_event, 0); MODULE_DEPEND(lio, pci, 1, 1, 1); MODULE_DEPEND(lio, ether, 1, 1, 1); MODULE_DEPEND(lio, firmware, 1, 1, 1); static bool fw_type_is_none(void) { return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE, sizeof(LIO_FW_NAME_TYPE_NONE)) == 0; } /* * \brief Device initialization for each Octeon device that is probed * @param octeon_dev octeon device */ static int lio_device_init(struct octeon_device *octeon_dev) { unsigned long ddr_timeout = LIO_DDR_TIMEOUT; char *dbg_enb = NULL; int fw_loaded = 0; int i, j, ret; uint8_t bus, dev, function; char bootcmd[] = "\n"; bus = pci_get_bus(octeon_dev->device); dev = pci_get_slot(octeon_dev->device); function = pci_get_function(octeon_dev->device); atomic_store_rel_int(&octeon_dev->status, LIO_DEV_BEGIN_STATE); /* Enable access to the octeon device */ if (pci_enable_busmaster(octeon_dev->device)) { lio_dev_err(octeon_dev, "pci_enable_device failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_ENABLE_DONE); /* Identify the Octeon type and map the BAR address space. */ if (lio_chip_specific_setup(octeon_dev)) { lio_dev_err(octeon_dev, "Chip specific setup failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_PCI_MAP_DONE); /* * Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE', * since that is what is required for the reference to be removed * during de-initialization (see 'octeon_destroy_resources'). */ lio_register_device(octeon_dev, bus, dev, function, true); octeon_dev->app_mode = LIO_DRV_INVALID_APP; if (!lio_cn23xx_pf_fw_loaded(octeon_dev) && !fw_type_is_none()) { fw_loaded = 0; /* Do a soft reset of the Octeon device. */ if (octeon_dev->fn_list.soft_reset(octeon_dev)) return (1); /* things might have changed */ if (!lio_cn23xx_pf_fw_loaded(octeon_dev)) fw_loaded = 0; else fw_loaded = 1; } else { fw_loaded = 1; } /* * Initialize the dispatch mechanism used to push packets arriving on * Octeon Output queues. */ if (lio_init_dispatch_list(octeon_dev)) return (1); lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CORE_DRV_ACTIVE, lio_core_drv_init, octeon_dev); atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DISPATCH_INIT_DONE); ret = octeon_dev->fn_list.setup_device_regs(octeon_dev); if (ret) { lio_dev_err(octeon_dev, "Failed to configure device registers\n"); return (ret); } /* Initialize soft command buffer pool */ if (lio_setup_sc_buffer_pool(octeon_dev)) { lio_dev_err(octeon_dev, "sc buffer pool allocation failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_SC_BUFF_POOL_INIT_DONE); if (lio_allocate_ioq_vector(octeon_dev)) { lio_dev_err(octeon_dev, "IOQ vector allocation failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_MSIX_ALLOC_VECTOR_DONE); for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) { octeon_dev->instr_queue[i] = malloc(sizeof(struct lio_instr_queue), M_DEVBUF, M_NOWAIT | M_ZERO); if (octeon_dev->instr_queue[i] == NULL) return (1); } /* Setup the data structures that manage this Octeon's Input queues. */ if (lio_setup_instr_queue0(octeon_dev)) { lio_dev_err(octeon_dev, "Instruction queue initialization failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INSTR_QUEUE_INIT_DONE); /* * Initialize lists to manage the requests of different types that * arrive from user & kernel applications for this octeon device. */ if (lio_setup_response_list(octeon_dev)) { lio_dev_err(octeon_dev, "Response list allocation failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_RESP_LIST_INIT_DONE); for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) { octeon_dev->droq[i] = malloc(sizeof(*octeon_dev->droq[i]), M_DEVBUF, M_NOWAIT | M_ZERO); if (octeon_dev->droq[i] == NULL) return (1); } if (lio_setup_output_queue0(octeon_dev)) { lio_dev_err(octeon_dev, "Output queue initialization failed\n"); return (1); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_DROQ_INIT_DONE); /* * Setup the interrupt handler and record the INT SUM register address */ if (lio_setup_interrupt(octeon_dev, octeon_dev->sriov_info.num_pf_rings)) return (1); /* Enable Octeon device interrupts */ octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR); atomic_store_rel_int(&octeon_dev->status, LIO_DEV_INTR_SET_DONE); /* * Send Credit for Octeon Output queues. Credits are always sent BEFORE * the output queue is enabled. * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0. * Otherwise, it is possible that the DRV_ACTIVE message will be sent * before any credits have been issued, causing the ring to be reset * (and the f/w appear to never have started). */ for (j = 0; j < octeon_dev->num_oqs; j++) lio_write_csr32(octeon_dev, octeon_dev->droq[j]->pkts_credit_reg, octeon_dev->droq[j]->max_count); /* Enable the input and output queues for this Octeon device */ ret = octeon_dev->fn_list.enable_io_queues(octeon_dev); if (ret) { lio_dev_err(octeon_dev, "Failed to enable input/output queues"); return (ret); } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_IO_QUEUES_DONE); if (!fw_loaded) { lio_dev_dbg(octeon_dev, "Waiting for DDR initialization...\n"); if (!ddr_timeout) { lio_dev_info(octeon_dev, "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n"); } lio_sleep_timeout(LIO_RESET_MSECS); /* * Wait for the octeon to initialize DDR after the * soft-reset. */ while (!ddr_timeout) { if (pause("-", lio_ms_to_ticks(100))) { /* user probably pressed Control-C */ return (1); } } ret = lio_wait_for_ddr_init(octeon_dev, &ddr_timeout); if (ret) { lio_dev_err(octeon_dev, "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n", ret); return (1); } if (lio_wait_for_bootloader(octeon_dev, 1100)) { lio_dev_err(octeon_dev, "Board not responding\n"); return (1); } /* Divert uboot to take commands from host instead. */ ret = lio_console_send_cmd(octeon_dev, bootcmd, 50); lio_dev_dbg(octeon_dev, "Initializing consoles\n"); ret = lio_init_consoles(octeon_dev); if (ret) { lio_dev_err(octeon_dev, "Could not access board consoles\n"); return (1); } /* * If console debug enabled, specify empty string to * use default enablement ELSE specify NULL string for * 'disabled'. */ dbg_enb = lio_console_debug_enabled(0) ? "" : NULL; ret = lio_add_console(octeon_dev, 0, dbg_enb); if (ret) { lio_dev_err(octeon_dev, "Could not access board console\n"); return (1); } else if (lio_console_debug_enabled(0)) { /* * If console was added AND we're logging console output * then set our console print function. */ octeon_dev->console[0].print = lio_dbg_console_print; } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_CONSOLE_INIT_DONE); lio_dev_dbg(octeon_dev, "Loading firmware\n"); ret = lio_load_firmware(octeon_dev); if (ret) { lio_dev_err(octeon_dev, "Could not load firmware to board\n"); return (1); } } atomic_store_rel_int(&octeon_dev->status, LIO_DEV_HOST_OK); return (0); } /* * \brief PCI FLR for each Octeon device. * @param oct octeon device */ static void lio_pci_flr(struct octeon_device *oct) { uint32_t exppos, status; pci_find_cap(oct->device, PCIY_EXPRESS, &exppos); pci_save_state(oct->device); /* Quiesce the device completely */ pci_write_config(oct->device, PCIR_COMMAND, PCIM_CMD_INTxDIS, 2); /* Wait for Transaction Pending bit clean */ lio_mdelay(100); status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2); if (status & PCIEM_STA_TRANSACTION_PND) { lio_dev_info(oct, "Function reset incomplete after 100ms, sleeping for 5 seconds\n"); lio_mdelay(5); status = pci_read_config(oct->device, exppos + PCIER_DEVICE_STA, 2); if (status & PCIEM_STA_TRANSACTION_PND) lio_dev_info(oct, "Function reset still incomplete after 5s, reset anyway\n"); } pci_write_config(oct->device, exppos + PCIER_DEVICE_CTL, PCIEM_CTL_INITIATE_FLR, 2); lio_mdelay(100); pci_restore_state(oct->device); } /* * \brief Debug console print function * @param octeon_dev octeon device * @param console_num console number * @param prefix first portion of line to display * @param suffix second portion of line to display * * The OCTEON debug console outputs entire lines (excluding '\n'). * Normally, the line will be passed in the 'prefix' parameter. * However, due to buffering, it is possible for a line to be split into two * parts, in which case they will be passed as the 'prefix' parameter and * 'suffix' parameter. */ static int lio_dbg_console_print(struct octeon_device *oct, uint32_t console_num, char *prefix, char *suffix) { if (prefix != NULL && suffix != NULL) lio_dev_info(oct, "%u: %s%s\n", console_num, prefix, suffix); else if (prefix != NULL) lio_dev_info(oct, "%u: %s\n", console_num, prefix); else if (suffix != NULL) lio_dev_info(oct, "%u: %s\n", console_num, suffix); return (0); } static void lio_watchdog(void *param) { int core_num; uint16_t mask_of_crashed_or_stuck_cores = 0; struct octeon_device *oct = param; bool err_msg_was_printed[12]; bzero(err_msg_was_printed, sizeof(err_msg_was_printed)); while (1) { kproc_suspend_check(oct->watchdog_task); mask_of_crashed_or_stuck_cores = (uint16_t)lio_read_csr64(oct, LIO_CN23XX_SLI_SCRATCH2); if (mask_of_crashed_or_stuck_cores) { struct octeon_device *other_oct; oct->cores_crashed = true; other_oct = lio_get_other_octeon_device(oct); if (other_oct != NULL) other_oct->cores_crashed = true; for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) { bool core_crashed_or_got_stuck; core_crashed_or_got_stuck = (mask_of_crashed_or_stuck_cores >> core_num) & 1; if (core_crashed_or_got_stuck && !err_msg_was_printed[core_num]) { lio_dev_err(oct, "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n", core_num); err_msg_was_printed[core_num] = true; } } } /* sleep for two seconds */ pause("-", lio_ms_to_ticks(2000)); } } static int lio_chip_specific_setup(struct octeon_device *oct) { char *s; uint32_t dev_id, rev_id; int ret = 1; dev_id = lio_read_pci_cfg(oct, 0); rev_id = pci_get_revid(oct->device); oct->subdevice_id = pci_get_subdevice(oct->device); switch (dev_id) { case LIO_CN23XX_PF_PCIID: oct->chip_id = LIO_CN23XX_PF_VID; if (pci_get_function(oct->device) == 0) { if (num_queues_per_pf0 < 0) { lio_dev_info(oct, "Invalid num_queues_per_pf0: %d, Setting it to default\n", num_queues_per_pf0); num_queues_per_pf0 = 0; } oct->sriov_info.num_pf_rings = num_queues_per_pf0; } else { if (num_queues_per_pf1 < 0) { lio_dev_info(oct, "Invalid num_queues_per_pf1: %d, Setting it to default\n", num_queues_per_pf1); num_queues_per_pf1 = 0; } oct->sriov_info.num_pf_rings = num_queues_per_pf1; } ret = lio_cn23xx_pf_setup_device(oct); s = "CN23XX"; break; default: s = "?"; lio_dev_err(oct, "Unknown device found (dev_id: %x)\n", dev_id); } if (!ret) lio_dev_info(oct, "%s PASS%d.%d %s Version: %s\n", s, OCTEON_MAJOR_REV(oct), OCTEON_MINOR_REV(oct), lio_get_conf(oct)->card_name, LIO_VERSION); return (ret); } static struct octeon_device * lio_get_other_octeon_device(struct octeon_device *oct) { struct octeon_device *other_oct; other_oct = lio_get_device(oct->octeon_id + 1); if ((other_oct != NULL) && other_oct->device) { int oct_busnum, other_oct_busnum; oct_busnum = pci_get_bus(oct->device); other_oct_busnum = pci_get_bus(other_oct->device); if (oct_busnum == other_oct_busnum) { int oct_slot, other_oct_slot; oct_slot = pci_get_slot(oct->device); other_oct_slot = pci_get_slot(other_oct->device); if (oct_slot == other_oct_slot) return (other_oct); } } return (NULL); } /* * \brief Load firmware to device * @param oct octeon device * * Maps device to firmware filename, requests firmware, and downloads it */ static int lio_load_firmware(struct octeon_device *oct) { const struct firmware *fw; char *tmp_fw_type = NULL; int ret = 0; char fw_name[LIO_MAX_FW_FILENAME_LEN]; if (fw_type[0] == '\0') tmp_fw_type = LIO_FW_NAME_TYPE_NIC; else tmp_fw_type = fw_type; sprintf(fw_name, "%s%s_%s%s", LIO_FW_BASE_NAME, lio_get_conf(oct)->card_name, tmp_fw_type, LIO_FW_NAME_SUFFIX); fw = firmware_get(fw_name); if (fw == NULL) { lio_dev_err(oct, "Request firmware failed. Could not find file %s.\n", fw_name); return (EINVAL); } ret = lio_download_firmware(oct, fw->data, fw->datasize); firmware_put(fw, FIRMWARE_UNLOAD); return (ret); } static int lio_nic_starter(struct octeon_device *oct) { int ret = 0; atomic_store_rel_int(&oct->status, LIO_DEV_RUNNING); if (oct->app_mode && oct->app_mode == LIO_DRV_NIC_APP) { if (lio_init_nic_module(oct)) { lio_dev_err(oct, "NIC initialization failed\n"); ret = -1; #ifdef CAVIUM_ONiLY_23XX_VF } else { if (octeon_enable_sriov(oct) < 0) ret = -1; #endif } } else { lio_dev_err(oct, "Unexpected application running on NIC (%d). Check firmware.\n", oct->app_mode); ret = -1; } return (ret); } static int lio_init_nic_module(struct octeon_device *oct) { int num_nic_ports = LIO_GET_NUM_NIC_PORTS_CFG(lio_get_conf(oct)); int retval = 0; lio_dev_dbg(oct, "Initializing network interfaces\n"); /* * only default iq and oq were initialized * initialize the rest as well */ /* run port_config command for each port */ oct->ifcount = num_nic_ports; bzero(&oct->props, sizeof(struct lio_if_props)); oct->props.gmxport = -1; retval = lio_setup_nic_devices(oct); if (retval) { lio_dev_err(oct, "Setup NIC devices failed\n"); goto lio_init_failure; } lio_dev_dbg(oct, "Network interfaces ready\n"); return (retval); lio_init_failure: oct->ifcount = 0; return (retval); } static int lio_ifmedia_update(struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); struct ifmedia *ifm; ifm = &lio->ifmedia; /* We only support Ethernet media type. */ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); switch (IFM_SUBTYPE(ifm->ifm_media)) { case IFM_AUTO: break; case IFM_10G_CX4: case IFM_10G_SR: case IFM_10G_T: case IFM_10G_TWINAX: default: /* We don't support changing the media type. */ lio_dev_err(lio->oct_dev, "Invalid media type (%d)\n", IFM_SUBTYPE(ifm->ifm_media)); return (EINVAL); } return (0); } static int lio_get_media_subtype(struct octeon_device *oct) { switch(oct->subdevice_id) { case LIO_CN2350_10G_SUBDEVICE: case LIO_CN2350_10G_SUBDEVICE1: case LIO_CN2360_10G_SUBDEVICE: return (IFM_10G_SR); case LIO_CN2350_25G_SUBDEVICE: case LIO_CN2360_25G_SUBDEVICE: return (IFM_25G_SR); } return (IFM_10G_SR); } static uint64_t lio_get_baudrate(struct octeon_device *oct) { switch(oct->subdevice_id) { case LIO_CN2350_10G_SUBDEVICE: case LIO_CN2350_10G_SUBDEVICE1: case LIO_CN2360_10G_SUBDEVICE: return (IF_Gbps(10)); case LIO_CN2350_25G_SUBDEVICE: case LIO_CN2360_25G_SUBDEVICE: return (IF_Gbps(25)); } return (IF_Gbps(10)); } static void lio_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct lio *lio = if_getsoftc(ifp); /* Report link down if the driver isn't running. */ if (!lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) { ifmr->ifm_active |= IFM_NONE; return; } /* Setup the default interface info. */ ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (lio->linfo.link.s.link_up) { ifmr->ifm_status |= IFM_ACTIVE; } else { ifmr->ifm_active |= IFM_NONE; return; } ifmr->ifm_active |= lio_get_media_subtype(lio->oct_dev); if (lio->linfo.link.s.duplex) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; } static uint64_t lio_get_counter(if_t ifp, ift_counter cnt) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; uint64_t counter = 0; int i, q_no; switch (cnt) { case IFCOUNTER_IPACKETS: for (i = 0; i < oct->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; counter += oct->droq[q_no]->stats.rx_pkts_received; } break; case IFCOUNTER_OPACKETS: for (i = 0; i < oct->num_iqs; i++) { q_no = lio->linfo.txpciq[i].s.q_no; counter += oct->instr_queue[q_no]->stats.tx_done; } break; case IFCOUNTER_IBYTES: for (i = 0; i < oct->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; counter += oct->droq[q_no]->stats.rx_bytes_received; } break; case IFCOUNTER_OBYTES: for (i = 0; i < oct->num_iqs; i++) { q_no = lio->linfo.txpciq[i].s.q_no; counter += oct->instr_queue[q_no]->stats.tx_tot_bytes; } break; case IFCOUNTER_IQDROPS: for (i = 0; i < oct->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; counter += oct->droq[q_no]->stats.rx_dropped; } break; case IFCOUNTER_OQDROPS: for (i = 0; i < oct->num_iqs; i++) { q_no = lio->linfo.txpciq[i].s.q_no; counter += oct->instr_queue[q_no]->stats.tx_dropped; } break; case IFCOUNTER_IMCASTS: counter = oct->link_stats.fromwire.total_mcst; break; case IFCOUNTER_OMCASTS: counter = oct->link_stats.fromhost.mcast_pkts_sent; break; case IFCOUNTER_COLLISIONS: counter = oct->link_stats.fromhost.total_collisions; break; case IFCOUNTER_IERRORS: counter = oct->link_stats.fromwire.fcs_err + oct->link_stats.fromwire.l2_err + oct->link_stats.fromwire.frame_err; break; default: return (if_get_counter_default(ifp, cnt)); } return (counter); } static int lio_init_ifnet(struct lio *lio) { struct octeon_device *oct = lio->oct_dev; if_t ifp = lio->ifp; /* ifconfig entrypoint for media type/status reporting */ ifmedia_init(&lio->ifmedia, IFM_IMASK, lio_ifmedia_update, lio_ifmedia_status); /* set the default interface values */ ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_FDX | lio_get_media_subtype(oct)), 0, NULL); ifmedia_add(&lio->ifmedia, (IFM_ETHER | IFM_AUTO), 0, NULL); ifmedia_set(&lio->ifmedia, (IFM_ETHER | IFM_AUTO)); lio->ifmedia.ifm_media = lio->ifmedia.ifm_cur->ifm_media; lio_dev_dbg(oct, "IFMEDIA flags : %x\n", lio->ifmedia.ifm_media); if_initname(ifp, device_get_name(oct->device), device_get_unit(oct->device)); if_setflags(ifp, (IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST)); if_setioctlfn(ifp, lio_ioctl); if_setgetcounterfn(ifp, lio_get_counter); if_settransmitfn(ifp, lio_mq_start); if_setqflushfn(ifp, lio_qflush); if_setinitfn(ifp, lio_open); if_setmtu(ifp, lio->linfo.link.s.mtu); lio->mtu = lio->linfo.link.s.mtu; if_sethwassist(ifp, (CSUM_IP | CSUM_TCP | CSUM_UDP | CSUM_TSO | CSUM_TCP_IPV6 | CSUM_UDP_IPV6)); if_setcapabilitiesbit(ifp, (IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_TSO | IFCAP_LRO | IFCAP_JUMBO_MTU | IFCAP_HWSTATS | IFCAP_LINKSTATE | IFCAP_VLAN_HWFILTER | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWTSO | IFCAP_VLAN_MTU), 0); if_setcapenable(ifp, if_getcapabilities(ifp)); if_setbaudrate(ifp, lio_get_baudrate(oct)); return (0); } static void lio_tcp_lro_free(struct octeon_device *octeon_dev, struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); struct lio_droq *droq; int q_no; int i; for (i = 0; i < octeon_dev->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; droq = octeon_dev->droq[q_no]; if (droq->lro.ifp) { tcp_lro_free(&droq->lro); droq->lro.ifp = NULL; } } } static int lio_tcp_lro_init(struct octeon_device *octeon_dev, struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); struct lio_droq *droq; struct lro_ctrl *lro; int i, q_no, ret = 0; for (i = 0; i < octeon_dev->num_oqs; i++) { q_no = lio->linfo.rxpciq[i].s.q_no; droq = octeon_dev->droq[q_no]; lro = &droq->lro; ret = tcp_lro_init(lro); if (ret) { lio_dev_err(octeon_dev, "LRO Initialization failed ret %d\n", ret); goto lro_init_failed; } lro->ifp = ifp; } return (ret); lro_init_failed: lio_tcp_lro_free(octeon_dev, ifp); return (ret); } static int lio_setup_nic_devices(struct octeon_device *octeon_dev) { union octeon_if_cfg if_cfg; struct lio *lio = NULL; struct ifnet *ifp = NULL; struct lio_version *vdata; struct lio_soft_command *sc; struct lio_if_cfg_context *ctx; struct lio_if_cfg_resp *resp; struct lio_if_props *props; int num_iqueues, num_oqueues, retval; unsigned int base_queue; unsigned int gmx_port_id; uint32_t ctx_size, data_size; uint32_t ifidx_or_pfnum, resp_size; uint8_t mac[ETHER_HDR_LEN], i, j; /* This is to handle link status changes */ lio_register_dispatch_fn(octeon_dev, LIO_OPCODE_NIC, LIO_OPCODE_NIC_INFO, lio_link_info, octeon_dev); for (i = 0; i < octeon_dev->ifcount; i++) { resp_size = sizeof(struct lio_if_cfg_resp); ctx_size = sizeof(struct lio_if_cfg_context); data_size = sizeof(struct lio_version); sc = lio_alloc_soft_command(octeon_dev, data_size, resp_size, ctx_size); if (sc == NULL) return (ENOMEM); resp = (struct lio_if_cfg_resp *)sc->virtrptr; ctx = (struct lio_if_cfg_context *)sc->ctxptr; vdata = (struct lio_version *)sc->virtdptr; *((uint64_t *)vdata) = 0; vdata->major = htobe16(LIO_BASE_MAJOR_VERSION); vdata->minor = htobe16(LIO_BASE_MINOR_VERSION); vdata->micro = htobe16(LIO_BASE_MICRO_VERSION); num_iqueues = octeon_dev->sriov_info.num_pf_rings; num_oqueues = octeon_dev->sriov_info.num_pf_rings; base_queue = octeon_dev->sriov_info.pf_srn; gmx_port_id = octeon_dev->pf_num; ifidx_or_pfnum = octeon_dev->pf_num; lio_dev_dbg(octeon_dev, "requesting config for interface %d, iqs %d, oqs %d\n", ifidx_or_pfnum, num_iqueues, num_oqueues); ctx->cond = 0; ctx->octeon_id = lio_get_device_id(octeon_dev); if_cfg.if_cfg64 = 0; if_cfg.s.num_iqueues = num_iqueues; if_cfg.s.num_oqueues = num_oqueues; if_cfg.s.base_queue = base_queue; if_cfg.s.gmx_port_id = gmx_port_id; sc->iq_no = 0; lio_prepare_soft_command(octeon_dev, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_IF_CFG, 0, if_cfg.if_cfg64, 0); sc->callback = lio_if_cfg_callback; sc->callback_arg = sc; sc->wait_time = 3000; retval = lio_send_soft_command(octeon_dev, sc); if (retval == LIO_IQ_SEND_FAILED) { lio_dev_err(octeon_dev, "iq/oq config failed status: %x\n", retval); /* Soft instr is freed by driver in case of failure. */ goto setup_nic_dev_fail; } /* * Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ lio_sleep_cond(octeon_dev, &ctx->cond); retval = resp->status; if (retval) { lio_dev_err(octeon_dev, "iq/oq config failed\n"); goto setup_nic_dev_fail; } lio_swap_8B_data((uint64_t *)(&resp->cfg_info), (sizeof(struct octeon_if_cfg_info)) >> 3); num_iqueues = bitcount64(resp->cfg_info.iqmask); num_oqueues = bitcount64(resp->cfg_info.oqmask); if (!(num_iqueues) || !(num_oqueues)) { lio_dev_err(octeon_dev, "Got bad iqueues (%016llX) or oqueues (%016llX) from firmware.\n", LIO_CAST64(resp->cfg_info.iqmask), LIO_CAST64(resp->cfg_info.oqmask)); goto setup_nic_dev_fail; } lio_dev_dbg(octeon_dev, "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n", i, LIO_CAST64(resp->cfg_info.iqmask), LIO_CAST64(resp->cfg_info.oqmask), num_iqueues, num_oqueues); ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { lio_dev_err(octeon_dev, "Device allocation failed\n"); goto setup_nic_dev_fail; } lio = malloc(sizeof(struct lio), M_DEVBUF, M_NOWAIT | M_ZERO); if (lio == NULL) { lio_dev_err(octeon_dev, "Lio allocation failed\n"); goto setup_nic_dev_fail; } if_setsoftc(ifp, lio); ifp->if_hw_tsomax = LIO_MAX_FRAME_SIZE; ifp->if_hw_tsomaxsegcount = LIO_MAX_SG; ifp->if_hw_tsomaxsegsize = PAGE_SIZE; lio->ifidx = ifidx_or_pfnum; props = &octeon_dev->props; props->gmxport = resp->cfg_info.linfo.gmxport; props->ifp = ifp; lio->linfo.num_rxpciq = num_oqueues; lio->linfo.num_txpciq = num_iqueues; for (j = 0; j < num_oqueues; j++) { lio->linfo.rxpciq[j].rxpciq64 = resp->cfg_info.linfo.rxpciq[j].rxpciq64; } for (j = 0; j < num_iqueues; j++) { lio->linfo.txpciq[j].txpciq64 = resp->cfg_info.linfo.txpciq[j].txpciq64; } lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr; lio->linfo.gmxport = resp->cfg_info.linfo.gmxport; lio->linfo.link.link_status64 = resp->cfg_info.linfo.link.link_status64; /* * Point to the properties for octeon device to which this * interface belongs. */ lio->oct_dev = octeon_dev; lio->ifp = ifp; lio_dev_dbg(octeon_dev, "if%d gmx: %d hw_addr: 0x%llx\n", i, lio->linfo.gmxport, LIO_CAST64(lio->linfo.hw_addr)); lio_init_ifnet(lio); /* 64-bit swap required on LE machines */ lio_swap_8B_data(&lio->linfo.hw_addr, 1); for (j = 0; j < 6; j++) mac[j] = *((uint8_t *)( ((uint8_t *)&lio->linfo.hw_addr) + 2 + j)); ether_ifattach(ifp, mac); /* * By default all interfaces on a single Octeon uses the same * tx and rx queues */ lio->txq = lio->linfo.txpciq[0].s.q_no; lio->rxq = lio->linfo.rxpciq[0].s.q_no; if (lio_setup_io_queues(octeon_dev, i, lio->linfo.num_txpciq, lio->linfo.num_rxpciq)) { lio_dev_err(octeon_dev, "I/O queues creation failed\n"); goto setup_nic_dev_fail; } lio_ifstate_set(lio, LIO_IFSTATE_DROQ_OPS); lio->tx_qsize = lio_get_tx_qsize(octeon_dev, lio->txq); lio->rx_qsize = lio_get_rx_qsize(octeon_dev, lio->rxq); if (lio_setup_glists(octeon_dev, lio, num_iqueues)) { lio_dev_err(octeon_dev, "Gather list allocation failed\n"); goto setup_nic_dev_fail; } if ((lio_hwlro == 0) && lio_tcp_lro_init(octeon_dev, ifp)) goto setup_nic_dev_fail; if (lio_hwlro && (if_getcapenable(ifp) & IFCAP_LRO) && (if_getcapenable(ifp) & IFCAP_RXCSUM) && (if_getcapenable(ifp) & IFCAP_RXCSUM_IPV6)) lio_set_feature(ifp, LIO_CMD_LRO_ENABLE, LIO_LROIPV4 | LIO_LROIPV6); if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER)) lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 1); else lio_set_feature(ifp, LIO_CMD_VLAN_FILTER_CTL, 0); if (lio_setup_rx_oom_poll_fn(ifp)) goto setup_nic_dev_fail; lio_dev_dbg(octeon_dev, "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n", i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); lio->link_changes++; lio_ifstate_set(lio, LIO_IFSTATE_REGISTERED); /* * Sending command to firmware to enable Rx checksum offload * by default at the time of setup of Liquidio driver for * this device */ lio_set_rxcsum_command(ifp, LIO_CMD_TNL_RX_CSUM_CTL, LIO_CMD_RXCSUM_ENABLE); lio_set_feature(ifp, LIO_CMD_TNL_TX_CSUM_CTL, LIO_CMD_TXCSUM_ENABLE); #ifdef RSS if (lio_rss) { if (lio_send_rss_param(lio)) goto setup_nic_dev_fail; } else #endif /* RSS */ lio_set_feature(ifp, LIO_CMD_SET_FNV, LIO_CMD_FNV_ENABLE); lio_dev_dbg(octeon_dev, "NIC ifidx:%d Setup successful\n", i); lio_free_soft_command(octeon_dev, sc); lio->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, lio_vlan_rx_add_vid, lio, EVENTHANDLER_PRI_FIRST); lio->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, lio_vlan_rx_kill_vid, lio, EVENTHANDLER_PRI_FIRST); /* Update stats periodically */ callout_init(&lio->stats_timer, 0); lio->stats_interval = LIO_DEFAULT_STATS_INTERVAL; lio_add_hw_stats(lio); } return (0); setup_nic_dev_fail: lio_free_soft_command(octeon_dev, sc); while (i--) { lio_dev_err(octeon_dev, "NIC ifidx:%d Setup failed\n", i); lio_destroy_nic_device(octeon_dev, i); } return (ENODEV); } static int lio_link_info(struct lio_recv_info *recv_info, void *ptr) { struct octeon_device *oct = (struct octeon_device *)ptr; struct lio_recv_pkt *recv_pkt = recv_info->recv_pkt; union octeon_link_status *ls; int gmxport = 0, i; lio_dev_dbg(oct, "%s Called\n", __func__); if (recv_pkt->buffer_size[0] != (sizeof(*ls) + LIO_DROQ_INFO_SIZE)) { lio_dev_err(oct, "Malformed NIC_INFO, len=%d, ifidx=%d\n", recv_pkt->buffer_size[0], recv_pkt->rh.r_nic_info.gmxport); goto nic_info_err; } gmxport = recv_pkt->rh.r_nic_info.gmxport; ls = (union octeon_link_status *)(recv_pkt->buffer_ptr[0]->m_data + LIO_DROQ_INFO_SIZE); lio_swap_8B_data((uint64_t *)ls, (sizeof(union octeon_link_status)) >> 3); if (oct->props.gmxport == gmxport) lio_update_link_status(oct->props.ifp, ls); nic_info_err: for (i = 0; i < recv_pkt->buffer_count; i++) lio_recv_buffer_free(recv_pkt->buffer_ptr[i]); lio_free_recv_info(recv_info); return (0); } void lio_free_mbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo) { bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(iq->txtag, finfo->map); m_freem(finfo->mb); } void lio_free_sgmbuf(struct lio_instr_queue *iq, struct lio_mbuf_free_info *finfo) { struct lio_gather *g; struct octeon_device *oct; struct lio *lio; int iq_no; g = finfo->g; iq_no = iq->txpciq.s.q_no; oct = iq->oct_dev; lio = if_getsoftc(oct->props.ifp); mtx_lock(&lio->glist_lock[iq_no]); STAILQ_INSERT_TAIL(&lio->ghead[iq_no], &g->node, entries); mtx_unlock(&lio->glist_lock[iq_no]); bus_dmamap_sync(iq->txtag, finfo->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(iq->txtag, finfo->map); m_freem(finfo->mb); } static void lio_if_cfg_callback(struct octeon_device *oct, uint32_t status, void *buf) { struct lio_soft_command *sc = (struct lio_soft_command *)buf; struct lio_if_cfg_resp *resp; struct lio_if_cfg_context *ctx; resp = (struct lio_if_cfg_resp *)sc->virtrptr; ctx = (struct lio_if_cfg_context *)sc->ctxptr; oct = lio_get_device(ctx->octeon_id); if (resp->status) lio_dev_err(oct, "nic if cfg instruction failed. Status: %llx (0x%08x)\n", LIO_CAST64(resp->status), status); ctx->cond = 1; snprintf(oct->fw_info.lio_firmware_version, 32, "%s", resp->cfg_info.lio_firmware_version); /* * This barrier is required to be sure that the response has been * written fully before waking up the handler */ wmb(); } static int lio_is_mac_changed(uint8_t *new, uint8_t *old) { return ((new[0] != old[0]) || (new[1] != old[1]) || (new[2] != old[2]) || (new[3] != old[3]) || (new[4] != old[4]) || (new[5] != old[5])); } void lio_open(void *arg) { struct lio *lio = arg; struct ifnet *ifp = lio->ifp; struct octeon_device *oct = lio->oct_dev; uint8_t *mac_new, mac_old[ETHER_HDR_LEN]; int ret = 0; lio_ifstate_set(lio, LIO_IFSTATE_RUNNING); /* Ready for link status updates */ lio->intf_open = 1; lio_dev_info(oct, "Interface Open, ready for traffic\n"); /* tell Octeon to start forwarding packets to host */ lio_send_rx_ctrl_cmd(lio, 1); mac_new = IF_LLADDR(ifp); memcpy(mac_old, ((uint8_t *)&lio->linfo.hw_addr) + 2, ETHER_HDR_LEN); if (lio_is_mac_changed(mac_new, mac_old)) { ret = lio_set_mac(ifp, mac_new); if (ret) lio_dev_err(oct, "MAC change failed, error: %d\n", ret); } /* Now inform the stack we're ready */ if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); lio_dev_info(oct, "Interface is opened\n"); } static int lio_set_rxcsum_command(struct ifnet *ifp, int command, uint8_t rx_cmd) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = command; nctrl.ncmd.s.param1 = rx_cmd; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n", ret); } return (ret); } static int lio_stop_nic_module(struct octeon_device *oct) { int i, j; struct lio *lio; lio_dev_dbg(oct, "Stopping network interfaces\n"); if (!oct->ifcount) { lio_dev_err(oct, "Init for Octeon was not completed\n"); return (1); } mtx_lock(&oct->cmd_resp_wqlock); oct->cmd_resp_state = LIO_DRV_OFFLINE; mtx_unlock(&oct->cmd_resp_wqlock); for (i = 0; i < oct->ifcount; i++) { lio = if_getsoftc(oct->props.ifp); for (j = 0; j < oct->num_oqs; j++) lio_unregister_droq_ops(oct, lio->linfo.rxpciq[j].s.q_no); } callout_drain(&lio->stats_timer); for (i = 0; i < oct->ifcount; i++) lio_destroy_nic_device(oct, i); lio_dev_dbg(oct, "Network interface stopped\n"); return (0); } static void lio_delete_glists(struct octeon_device *oct, struct lio *lio) { struct lio_gather *g; int i; if (lio->glist_lock != NULL) { free((void *)lio->glist_lock, M_DEVBUF); lio->glist_lock = NULL; } if (lio->ghead == NULL) return; for (i = 0; i < lio->linfo.num_txpciq; i++) { do { g = (struct lio_gather *) lio_delete_first_node(&lio->ghead[i]); free(g, M_DEVBUF); } while (g); if ((lio->glists_virt_base != NULL) && (lio->glists_virt_base[i] != NULL)) { lio_dma_free(lio->glist_entry_size * lio->tx_qsize, lio->glists_virt_base[i]); } } free(lio->glists_virt_base, M_DEVBUF); lio->glists_virt_base = NULL; free(lio->glists_dma_base, M_DEVBUF); lio->glists_dma_base = NULL; free(lio->ghead, M_DEVBUF); lio->ghead = NULL; } static int lio_setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs) { struct lio_gather *g; int i, j; - lio->glist_lock = malloc(num_iqs * sizeof(*lio->glist_lock), M_DEVBUF, - M_NOWAIT | M_ZERO); + lio->glist_lock = mallocarray(num_iqs, sizeof(*lio->glist_lock), + M_DEVBUF, M_NOWAIT | M_ZERO); if (lio->glist_lock == NULL) return (1); - lio->ghead = malloc(num_iqs * sizeof(*lio->ghead), M_DEVBUF, + lio->ghead = mallocarray(num_iqs, sizeof(*lio->ghead), M_DEVBUF, M_NOWAIT | M_ZERO); if (lio->ghead == NULL) { free((void *)lio->glist_lock, M_DEVBUF); lio->glist_lock = NULL; return (1); } lio->glist_entry_size = ROUNDUP8((ROUNDUP4(LIO_MAX_SG) >> 2) * LIO_SG_ENTRY_SIZE); /* * allocate memory to store virtual and dma base address of * per glist consistent memory */ - lio->glists_virt_base = malloc(num_iqs * sizeof(void *), M_DEVBUF, + lio->glists_virt_base = mallocarray(num_iqs, sizeof(void *), M_DEVBUF, M_NOWAIT | M_ZERO); - lio->glists_dma_base = malloc(num_iqs * sizeof(vm_paddr_t), M_DEVBUF, - M_NOWAIT | M_ZERO); + lio->glists_dma_base = mallocarray(num_iqs, sizeof(vm_paddr_t), + M_DEVBUF, M_NOWAIT | M_ZERO); if ((lio->glists_virt_base == NULL) || (lio->glists_dma_base == NULL)) { lio_delete_glists(oct, lio); return (1); } for (i = 0; i < num_iqs; i++) { mtx_init(&lio->glist_lock[i], "glist_lock", NULL, MTX_DEF); STAILQ_INIT(&lio->ghead[i]); lio->glists_virt_base[i] = lio_dma_alloc(lio->glist_entry_size * lio->tx_qsize, (vm_paddr_t *)&lio->glists_dma_base[i]); if (lio->glists_virt_base[i] == NULL) { lio_delete_glists(oct, lio); return (1); } for (j = 0; j < lio->tx_qsize; j++) { g = malloc(sizeof(*g), M_DEVBUF, M_NOWAIT | M_ZERO); if (g == NULL) break; g->sg = (struct lio_sg_entry *) ((uint64_t)lio->glists_virt_base[i] + (j * lio->glist_entry_size)); g->sg_dma_ptr = (uint64_t)lio->glists_dma_base[i] + (j * lio->glist_entry_size); STAILQ_INSERT_TAIL(&lio->ghead[i], &g->node, entries); } if (j != lio->tx_qsize) { lio_delete_glists(oct, lio); return (1); } } return (0); } void lio_stop(struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; lio_ifstate_reset(lio, LIO_IFSTATE_RUNNING); if_link_state_change(ifp, LINK_STATE_DOWN); lio->intf_open = 0; lio->linfo.link.s.link_up = 0; lio->link_changes++; lio_send_rx_ctrl_cmd(lio, 0); /* Tell the stack that the interface is no longer active */ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); lio_dev_info(oct, "Interface is stopped\n"); } static void lio_check_rx_oom_status(struct lio *lio) { struct lio_droq *droq; struct octeon_device *oct = lio->oct_dev; int desc_refilled; int q, q_no = 0; for (q = 0; q < oct->num_oqs; q++) { q_no = lio->linfo.rxpciq[q].s.q_no; droq = oct->droq[q_no]; if (droq == NULL) continue; if (lio_read_csr32(oct, droq->pkts_credit_reg) <= 0x40) { mtx_lock(&droq->lock); desc_refilled = lio_droq_refill(oct, droq); /* * Flush the droq descriptor data to memory to be sure * that when we update the credits the data in memory * is accurate. */ wmb(); lio_write_csr32(oct, droq->pkts_credit_reg, desc_refilled); /* make sure mmio write completes */ __compiler_membar(); mtx_unlock(&droq->lock); } } } static void lio_poll_check_rx_oom_status(void *arg, int pending __unused) { struct lio_tq *rx_status_tq = arg; struct lio *lio = rx_status_tq->ctxptr; if (lio_ifstate_check(lio, LIO_IFSTATE_RUNNING)) lio_check_rx_oom_status(lio); taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work, lio_ms_to_ticks(50)); } static int lio_setup_rx_oom_poll_fn(struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; struct lio_tq *rx_status_tq; rx_status_tq = &lio->rx_status_tq; rx_status_tq->tq = taskqueue_create("lio_rx_oom_status", M_WAITOK, taskqueue_thread_enqueue, &rx_status_tq->tq); if (rx_status_tq->tq == NULL) { lio_dev_err(oct, "unable to create lio rx oom status tq\n"); return (-1); } TIMEOUT_TASK_INIT(rx_status_tq->tq, &rx_status_tq->work, 0, lio_poll_check_rx_oom_status, (void *)rx_status_tq); rx_status_tq->ctxptr = lio; taskqueue_start_threads(&rx_status_tq->tq, 1, PI_NET, "lio%d_rx_oom_status", oct->octeon_id); taskqueue_enqueue_timeout(rx_status_tq->tq, &rx_status_tq->work, lio_ms_to_ticks(50)); return (0); } static void lio_cleanup_rx_oom_poll_fn(struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); if (lio->rx_status_tq.tq != NULL) { while (taskqueue_cancel_timeout(lio->rx_status_tq.tq, &lio->rx_status_tq.work, NULL)) taskqueue_drain_timeout(lio->rx_status_tq.tq, &lio->rx_status_tq.work); taskqueue_free(lio->rx_status_tq.tq); lio->rx_status_tq.tq = NULL; } } static void lio_destroy_nic_device(struct octeon_device *oct, int ifidx) { struct ifnet *ifp = oct->props.ifp; struct lio *lio; if (ifp == NULL) { lio_dev_err(oct, "%s No ifp ptr for index %d\n", __func__, ifidx); return; } lio = if_getsoftc(ifp); lio_ifstate_set(lio, LIO_IFSTATE_DETACH); lio_dev_dbg(oct, "NIC device cleanup\n"); if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_RUNNING) lio_stop(ifp); if (lio_wait_for_pending_requests(oct)) lio_dev_err(oct, "There were pending requests\n"); if (lio_wait_for_instr_fetch(oct)) lio_dev_err(oct, "IQ had pending instructions\n"); if (lio_wait_for_oq_pkts(oct)) lio_dev_err(oct, "OQ had pending packets\n"); if (atomic_load_acq_int(&lio->ifstate) & LIO_IFSTATE_REGISTERED) ether_ifdetach(ifp); lio_tcp_lro_free(oct, ifp); lio_cleanup_rx_oom_poll_fn(ifp); lio_delete_glists(oct, lio); EVENTHANDLER_DEREGISTER(vlan_config, lio->vlan_attach); EVENTHANDLER_DEREGISTER(vlan_unconfig, lio->vlan_detach); free(lio, M_DEVBUF); if_free(ifp); oct->props.gmxport = -1; oct->props.ifp = NULL; } static void print_link_info(struct ifnet *ifp) { struct lio *lio = if_getsoftc(ifp); if (!lio_ifstate_check(lio, LIO_IFSTATE_RESETTING) && lio_ifstate_check(lio, LIO_IFSTATE_REGISTERED)) { struct octeon_link_info *linfo = &lio->linfo; if (linfo->link.s.link_up) { lio_dev_info(lio->oct_dev, "%d Mbps %s Duplex UP\n", linfo->link.s.speed, (linfo->link.s.duplex) ? "Full" : "Half"); } else { lio_dev_info(lio->oct_dev, "Link Down\n"); } } } static inline void lio_update_link_status(struct ifnet *ifp, union octeon_link_status *ls) { struct lio *lio = if_getsoftc(ifp); int changed = (lio->linfo.link.link_status64 != ls->link_status64); lio->linfo.link.link_status64 = ls->link_status64; if ((lio->intf_open) && (changed)) { print_link_info(ifp); lio->link_changes++; if (lio->linfo.link.s.link_up) if_link_state_change(ifp, LINK_STATE_UP); else if_link_state_change(ifp, LINK_STATE_DOWN); } } /* * \brief Callback for rx ctrl * @param status status of request * @param buf pointer to resp structure */ static void lio_rx_ctl_callback(struct octeon_device *oct, uint32_t status, void *buf) { struct lio_soft_command *sc = (struct lio_soft_command *)buf; struct lio_rx_ctl_context *ctx; ctx = (struct lio_rx_ctl_context *)sc->ctxptr; oct = lio_get_device(ctx->octeon_id); if (status) lio_dev_err(oct, "rx ctl instruction failed. Status: %llx\n", LIO_CAST64(status)); ctx->cond = 1; /* * This barrier is required to be sure that the response has been * written fully before waking up the handler */ wmb(); } static void lio_send_rx_ctrl_cmd(struct lio *lio, int start_stop) { struct lio_soft_command *sc; struct lio_rx_ctl_context *ctx; union octeon_cmd *ncmd; struct octeon_device *oct = (struct octeon_device *)lio->oct_dev; int ctx_size = sizeof(struct lio_rx_ctl_context); int retval; if (oct->props.rx_on == start_stop) return; sc = lio_alloc_soft_command(oct, OCTEON_CMD_SIZE, 16, ctx_size); if (sc == NULL) return; ncmd = (union octeon_cmd *)sc->virtdptr; ctx = (struct lio_rx_ctl_context *)sc->ctxptr; ctx->cond = 0; ctx->octeon_id = lio_get_device_id(oct); ncmd->cmd64 = 0; ncmd->s.cmd = LIO_CMD_RX_CTL; ncmd->s.param1 = start_stop; lio_swap_8B_data((uint64_t *)ncmd, (OCTEON_CMD_SIZE >> 3)); sc->iq_no = lio->linfo.txpciq[0].s.q_no; lio_prepare_soft_command(oct, sc, LIO_OPCODE_NIC, LIO_OPCODE_NIC_CMD, 0, 0, 0); sc->callback = lio_rx_ctl_callback; sc->callback_arg = sc; sc->wait_time = 5000; retval = lio_send_soft_command(oct, sc); if (retval == LIO_IQ_SEND_FAILED) { lio_dev_err(oct, "Failed to send RX Control message\n"); } else { /* * Sleep on a wait queue till the cond flag indicates that the * response arrived or timed-out. */ lio_sleep_cond(oct, &ctx->cond); oct->props.rx_on = start_stop; } lio_free_soft_command(oct, sc); } static void lio_vlan_rx_add_vid(void *arg, struct ifnet *ifp, uint16_t vid) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; if (if_getsoftc(ifp) != arg) /* Not our event */ return; if ((vid == 0) || (vid > 4095)) /* Invalid */ return; bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = LIO_CMD_ADD_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "Add VLAN filter failed in core (ret: 0x%x)\n", ret); } } static void lio_vlan_rx_kill_vid(void *arg, struct ifnet *ifp, uint16_t vid) { struct lio_ctrl_pkt nctrl; struct lio *lio = if_getsoftc(ifp); struct octeon_device *oct = lio->oct_dev; int ret = 0; if (if_getsoftc(ifp) != arg) /* Not our event */ return; if ((vid == 0) || (vid > 4095)) /* Invalid */ return; bzero(&nctrl, sizeof(struct lio_ctrl_pkt)); nctrl.ncmd.cmd64 = 0; nctrl.ncmd.s.cmd = LIO_CMD_DEL_VLAN_FILTER; nctrl.ncmd.s.param1 = vid; nctrl.iq_no = lio->linfo.txpciq[0].s.q_no; nctrl.wait_time = 100; nctrl.lio = lio; nctrl.cb_fn = lio_ctrl_cmd_completion; ret = lio_send_ctrl_pkt(lio->oct_dev, &nctrl); if (ret < 0) { lio_dev_err(oct, "Kill VLAN filter failed in core (ret: 0x%x)\n", ret); } } static int lio_wait_for_oq_pkts(struct octeon_device *oct) { int i, pending_pkts, pkt_cnt = 0, retry = 100; do { pending_pkts = 0; for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) continue; pkt_cnt = lio_droq_check_hw_for_pkts(oct->droq[i]); if (pkt_cnt > 0) { pending_pkts += pkt_cnt; taskqueue_enqueue(oct->droq[i]->droq_taskqueue, &oct->droq[i]->droq_task); } } pkt_cnt = 0; lio_sleep_timeout(1); } while (retry-- && pending_pkts); return (pkt_cnt); } static void lio_destroy_resources(struct octeon_device *oct) { int i, refcount; switch (atomic_load_acq_int(&oct->status)) { case LIO_DEV_RUNNING: case LIO_DEV_CORE_OK: /* No more instructions will be forwarded. */ atomic_store_rel_int(&oct->status, LIO_DEV_IN_RESET); oct->app_mode = LIO_DRV_INVALID_APP; lio_dev_dbg(oct, "Device state is now %s\n", lio_get_state_string(&oct->status)); lio_sleep_timeout(100); /* fallthrough */ case LIO_DEV_HOST_OK: /* fallthrough */ case LIO_DEV_CONSOLE_INIT_DONE: /* Remove any consoles */ lio_remove_consoles(oct); /* fallthrough */ case LIO_DEV_IO_QUEUES_DONE: if (lio_wait_for_pending_requests(oct)) lio_dev_err(oct, "There were pending requests\n"); if (lio_wait_for_instr_fetch(oct)) lio_dev_err(oct, "IQ had pending instructions\n"); /* * Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet * processing to finish. */ oct->fn_list.disable_io_queues(oct); if (lio_wait_for_oq_pkts(oct)) lio_dev_err(oct, "OQ had pending packets\n"); /* fallthrough */ case LIO_DEV_INTR_SET_DONE: /* Disable interrupts */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); if (oct->msix_on) { for (i = 0; i < oct->num_msix_irqs - 1; i++) { if (oct->ioq_vector[i].tag != NULL) { bus_teardown_intr(oct->device, oct->ioq_vector[i].msix_res, oct->ioq_vector[i].tag); oct->ioq_vector[i].tag = NULL; } if (oct->ioq_vector[i].msix_res != NULL) { bus_release_resource(oct->device, SYS_RES_IRQ, oct->ioq_vector[i].vector, oct->ioq_vector[i].msix_res); oct->ioq_vector[i].msix_res = NULL; } } /* non-iov vector's argument is oct struct */ if (oct->tag != NULL) { bus_teardown_intr(oct->device, oct->msix_res, oct->tag); oct->tag = NULL; } if (oct->msix_res != NULL) { bus_release_resource(oct->device, SYS_RES_IRQ, oct->aux_vector, oct->msix_res); oct->msix_res = NULL; } pci_release_msi(oct->device); } /* fallthrough */ case LIO_DEV_IN_RESET: case LIO_DEV_DROQ_INIT_DONE: /* Wait for any pending operations */ lio_mdelay(100); for (i = 0; i < LIO_MAX_OUTPUT_QUEUES(oct); i++) { if (!(oct->io_qmask.oq & BIT_ULL(i))) continue; lio_delete_droq(oct, i); } /* fallthrough */ case LIO_DEV_RESP_LIST_INIT_DONE: for (i = 0; i < LIO_MAX_POSSIBLE_OUTPUT_QUEUES; i++) { if (oct->droq[i] != NULL) { free(oct->droq[i], M_DEVBUF); oct->droq[i] = NULL; } } lio_delete_response_list(oct); /* fallthrough */ case LIO_DEV_INSTR_QUEUE_INIT_DONE: for (i = 0; i < LIO_MAX_INSTR_QUEUES(oct); i++) { if (!(oct->io_qmask.iq & BIT_ULL(i))) continue; lio_delete_instr_queue(oct, i); } /* fallthrough */ case LIO_DEV_MSIX_ALLOC_VECTOR_DONE: for (i = 0; i < LIO_MAX_POSSIBLE_INSTR_QUEUES; i++) { if (oct->instr_queue[i] != NULL) { free(oct->instr_queue[i], M_DEVBUF); oct->instr_queue[i] = NULL; } } lio_free_ioq_vector(oct); /* fallthrough */ case LIO_DEV_SC_BUFF_POOL_INIT_DONE: lio_free_sc_buffer_pool(oct); /* fallthrough */ case LIO_DEV_DISPATCH_INIT_DONE: lio_delete_dispatch_list(oct); /* fallthrough */ case LIO_DEV_PCI_MAP_DONE: refcount = lio_deregister_device(oct); if (fw_type_is_none()) lio_pci_flr(oct); if (!refcount) oct->fn_list.soft_reset(oct); lio_unmap_pci_barx(oct, 0); lio_unmap_pci_barx(oct, 1); /* fallthrough */ case LIO_DEV_PCI_ENABLE_DONE: /* Disable the device, releasing the PCI INT */ pci_disable_busmaster(oct->device); /* fallthrough */ case LIO_DEV_BEGIN_STATE: break; } /* end switch (oct->status) */ } Index: head/sys/dev/mpr/mpr.c =================================================================== --- head/sys/dev/mpr/mpr.c (revision 327948) +++ head/sys/dev/mpr/mpr.c (revision 327949) @@ -1,3831 +1,3831 @@ /*- * Copyright (c) 2009 Yahoo! Inc. * Copyright (c) 2011-2015 LSI Corp. * Copyright (c) 2013-2016 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD * */ #include __FBSDID("$FreeBSD$"); /* Communications core for Avago Technologies (LSI) MPT3 */ /* TODO Move headers to mprvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int mpr_diag_reset(struct mpr_softc *sc, int sleep_flag); static int mpr_init_queues(struct mpr_softc *sc); static void mpr_resize_queues(struct mpr_softc *sc); static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag); static int mpr_transition_operational(struct mpr_softc *sc); static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching); static void mpr_iocfacts_free(struct mpr_softc *sc); static void mpr_startup(void *arg); static int mpr_send_iocinit(struct mpr_softc *sc); static int mpr_alloc_queues(struct mpr_softc *sc); static int mpr_alloc_hw_queues(struct mpr_softc *sc); static int mpr_alloc_replies(struct mpr_softc *sc); static int mpr_alloc_requests(struct mpr_softc *sc); static int mpr_alloc_nvme_prp_pages(struct mpr_softc *sc); static int mpr_attach_log(struct mpr_softc *sc); static __inline void mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm); static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *reply); static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm); static void mpr_periodic(void *); static int mpr_reregister_events(struct mpr_softc *sc); static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm); static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts); static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag); static int mpr_debug_sysctl(SYSCTL_HANDLER_ARGS); static void mpr_parse_debug(struct mpr_softc *sc, char *list); SYSCTL_NODE(_hw, OID_AUTO, mpr, CTLFLAG_RD, 0, "MPR Driver Parameters"); MALLOC_DEFINE(M_MPR, "mpr", "mpr driver memory"); /* * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of * any state and back to its initialization state machine. */ static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d }; /* * Added this union to smoothly convert le64toh cm->cm_desc.Words. * Compiler only supports uint64_t to be passed as an argument. * Otherwise it will throw this error: * "aggregate value used where an integer was expected" */ typedef union _reply_descriptor { u64 word; struct { u32 low; u32 high; } u; } reply_descriptor, request_descriptor; /* Rate limit chain-fail messages to 1 per minute */ static struct timeval mpr_chainfail_interval = { 60, 0 }; /* * sleep_flag can be either CAN_SLEEP or NO_SLEEP. * If this function is called from process context, it can sleep * and there is no harm to sleep, in case if this fuction is called * from Interrupt handler, we can not sleep and need NO_SLEEP flag set. * based on sleep flags driver will call either msleep, pause or DELAY. * msleep and pause are of same variant, but pause is used when mpr_mtx * is not hold by driver. */ static int mpr_diag_reset(struct mpr_softc *sc,int sleep_flag) { uint32_t reg; int i, error, tries = 0; uint8_t first_wait_done = FALSE; mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); /* Clear any pending interrupts */ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); /* * Force NO_SLEEP for threads prohibited to sleep * e.a Thread from interrupt handler are prohibited to sleep. */ #if __FreeBSD_version >= 1000029 if (curthread->td_no_sleeping) #else //__FreeBSD_version < 1000029 if (curthread->td_pflags & TDP_NOSLEEPING) #endif //__FreeBSD_version >= 1000029 sleep_flag = NO_SLEEP; mpr_dprint(sc, MPR_INIT, "sequence start, sleep_flag=%d\n", sleep_flag); /* Push the magic sequence */ error = ETIMEDOUT; while (tries++ < 20) { for (i = 0; i < sizeof(mpt2_reset_magic); i++) mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, mpt2_reset_magic[i]); /* wait 100 msec */ if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdiag", hz/10); else if (sleep_flag == CAN_SLEEP) pause("mprdiag", hz/10); else DELAY(100 * 1000); reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) { error = 0; break; } } if (error) { mpr_dprint(sc, MPR_INIT, "sequence failed, error=%d, exit\n", error); return (error); } /* Send the actual reset. XXX need to refresh the reg? */ reg |= MPI2_DIAG_RESET_ADAPTER; mpr_dprint(sc, MPR_INIT, "sequence success, sending reset, reg= 0x%x\n", reg); mpr_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, reg); /* Wait up to 300 seconds in 50ms intervals */ error = ETIMEDOUT; for (i = 0; i < 6000; i++) { /* * Wait 50 msec. If this is the first time through, wait 256 * msec to satisfy Diag Reset timing requirements. */ if (first_wait_done) { if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdiag", hz/20); else if (sleep_flag == CAN_SLEEP) pause("mprdiag", hz/20); else DELAY(50 * 1000); } else { DELAY(256 * 1000); first_wait_done = TRUE; } /* * Check for the RESET_ADAPTER bit to be cleared first, then * wait for the RESET state to be cleared, which takes a little * longer. */ reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); if (reg & MPI2_DIAG_RESET_ADAPTER) { continue; } reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) { error = 0; break; } } if (error) { mpr_dprint(sc, MPR_INIT, "reset failed, error= %d, exit\n", error); return (error); } mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0); mpr_dprint(sc, MPR_INIT, "diag reset success, exit\n"); return (0); } static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag) { int error; MPR_FUNCTRACE(sc); mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); error = 0; mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI2_DOORBELL_FUNCTION_SHIFT); if (mpr_wait_db_ack(sc, 5, sleep_flag) != 0) { mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Doorbell handshake failed\n"); error = ETIMEDOUT; } mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); return (error); } static int mpr_transition_ready(struct mpr_softc *sc) { uint32_t reg, state; int error, tries = 0; int sleep_flags; MPR_FUNCTRACE(sc); /* If we are in attach call, do not sleep */ sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE) ? CAN_SLEEP : NO_SLEEP; error = 0; mpr_dprint(sc, MPR_INIT, "%s entered, sleep_flags= %d\n", __func__, sleep_flags); while (tries++ < 1200) { reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); mpr_dprint(sc, MPR_INIT, " Doorbell= 0x%x\n", reg); /* * Ensure the IOC is ready to talk. If it's not, try * resetting it. */ if (reg & MPI2_DOORBELL_USED) { mpr_dprint(sc, MPR_INIT, " Not ready, sending diag " "reset\n"); mpr_diag_reset(sc, sleep_flags); DELAY(50000); continue; } /* Is the adapter owned by another peer? */ if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) == (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) { mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC is under the " "control of another peer host, aborting " "initialization.\n"); error = ENXIO; break; } state = reg & MPI2_IOC_STATE_MASK; if (state == MPI2_IOC_STATE_READY) { /* Ready to go! */ error = 0; break; } else if (state == MPI2_IOC_STATE_FAULT) { mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC in fault " "state 0x%x, resetting\n", state & MPI2_DOORBELL_FAULT_CODE_MASK); mpr_diag_reset(sc, sleep_flags); } else if (state == MPI2_IOC_STATE_OPERATIONAL) { /* Need to take ownership */ mpr_message_unit_reset(sc, sleep_flags); } else if (state == MPI2_IOC_STATE_RESET) { /* Wait a bit, IOC might be in transition */ mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC in unexpected reset state\n"); } else { mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC in unknown state 0x%x\n", state); error = EINVAL; break; } /* Wait 50ms for things to settle down. */ DELAY(50000); } if (error) mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Cannot transition IOC to ready\n"); mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); return (error); } static int mpr_transition_operational(struct mpr_softc *sc) { uint32_t reg, state; int error; MPR_FUNCTRACE(sc); error = 0; reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); mpr_dprint(sc, MPR_INIT, "%s entered, Doorbell= 0x%x\n", __func__, reg); state = reg & MPI2_IOC_STATE_MASK; if (state != MPI2_IOC_STATE_READY) { mpr_dprint(sc, MPR_INIT, "IOC not ready\n"); if ((error = mpr_transition_ready(sc)) != 0) { mpr_dprint(sc, MPR_INIT|MPR_FAULT, "failed to transition ready, exit\n"); return (error); } } error = mpr_send_iocinit(sc); mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); return (error); } static void mpr_resize_queues(struct mpr_softc *sc) { int reqcr, prireqcr; /* * Size the queues. Since the reply queues always need one free * entry, we'll deduct one reply message here. The LSI documents * suggest instead to add a count to the request queue, but I think * that it's better to deduct from reply queue. */ prireqcr = MAX(1, sc->max_prireqframes); prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit); reqcr = MAX(2, sc->max_reqframes); reqcr = MIN(reqcr, sc->facts->RequestCredit); sc->num_reqs = prireqcr + reqcr; sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes, sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; /* * Figure out the number of MSIx-based queues. If the firmware or * user has done something crazy and not allowed enough credit for * the queues to be useful then don't enable multi-queue. */ if (sc->facts->MaxMSIxVectors < 2) sc->msi_msgs = 1; if (sc->msi_msgs > 1) { sc->msi_msgs = MIN(sc->msi_msgs, mp_ncpus); sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors); if (sc->num_reqs / sc->msi_msgs < 2) sc->msi_msgs = 1; } mpr_dprint(sc, MPR_INIT, "Sized queues to q=%d reqs=%d replies=%d\n", sc->msi_msgs, sc->num_reqs, sc->num_replies); } /* * This is called during attach and when re-initializing due to a Diag Reset. * IOC Facts is used to allocate many of the structures needed by the driver. * If called from attach, de-allocation is not required because the driver has * not allocated any structures yet, but if called from a Diag Reset, previously * allocated structures based on IOC Facts will need to be freed and re- * allocated bases on the latest IOC Facts. */ static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching) { int error; Mpi2IOCFactsReply_t saved_facts; uint8_t saved_mode, reallocating; mpr_dprint(sc, MPR_INIT|MPR_TRACE, "%s entered\n", __func__); /* Save old IOC Facts and then only reallocate if Facts have changed */ if (!attaching) { bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY)); } /* * Get IOC Facts. In all cases throughout this function, panic if doing * a re-initialization and only return the error if attaching so the OS * can handle it. */ if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) { if (attaching) { mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to get " "IOC Facts with error %d, exit\n", error); return (error); } else { panic("%s failed to get IOC Facts with error %d\n", __func__, error); } } MPR_DPRINT_PAGE(sc, MPR_XINFO, iocfacts, sc->facts); snprintf(sc->fw_version, sizeof(sc->fw_version), "%02d.%02d.%02d.%02d", sc->facts->FWVersion.Struct.Major, sc->facts->FWVersion.Struct.Minor, sc->facts->FWVersion.Struct.Unit, sc->facts->FWVersion.Struct.Dev); mpr_dprint(sc, MPR_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version, MPR_DRIVER_VERSION); mpr_dprint(sc, MPR_INFO, "IOCCapabilities: %b\n", sc->facts->IOCCapabilities, "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf" "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR" "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc" "\22FastPath" "\23RDPQArray" "\24AtomicReqDesc" "\25PCIeSRIOV"); /* * If the chip doesn't support event replay then a hard reset will be * required to trigger a full discovery. Do the reset here then * retransition to Ready. A hard reset might have already been done, * but it doesn't hurt to do it again. Only do this if attaching, not * for a Diag Reset. */ if (attaching && ((sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0)) { mpr_dprint(sc, MPR_INIT, "No event replay, resetting\n"); mpr_diag_reset(sc, NO_SLEEP); if ((error = mpr_transition_ready(sc)) != 0) { mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to " "transition to ready with error %d, exit\n", error); return (error); } } /* * Set flag if IR Firmware is loaded. If the RAID Capability has * changed from the previous IOC Facts, log a warning, but only if * checking this after a Diag Reset and not during attach. */ saved_mode = sc->ir_firmware; if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) sc->ir_firmware = 1; if (!attaching) { if (sc->ir_firmware != saved_mode) { mpr_dprint(sc, MPR_INIT|MPR_FAULT, "new IR/IT mode " "in IOC Facts does not match previous mode\n"); } } /* Only deallocate and reallocate if relevant IOC Facts have changed */ reallocating = FALSE; sc->mpr_flags &= ~MPR_FLAGS_REALLOCATED; if ((!attaching) && ((saved_facts.MsgVersion != sc->facts->MsgVersion) || (saved_facts.HeaderVersion != sc->facts->HeaderVersion) || (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) || (saved_facts.RequestCredit != sc->facts->RequestCredit) || (saved_facts.ProductID != sc->facts->ProductID) || (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) || (saved_facts.IOCRequestFrameSize != sc->facts->IOCRequestFrameSize) || (saved_facts.IOCMaxChainSegmentSize != sc->facts->IOCMaxChainSegmentSize) || (saved_facts.MaxTargets != sc->facts->MaxTargets) || (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) || (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) || (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) || (saved_facts.MaxReplyDescriptorPostQueueDepth != sc->facts->MaxReplyDescriptorPostQueueDepth) || (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) || (saved_facts.MaxVolumes != sc->facts->MaxVolumes) || (saved_facts.MaxPersistentEntries != sc->facts->MaxPersistentEntries))) { reallocating = TRUE; /* Record that we reallocated everything */ sc->mpr_flags |= MPR_FLAGS_REALLOCATED; } /* * Some things should be done if attaching or re-allocating after a Diag * Reset, but are not needed after a Diag Reset if the FW has not * changed. */ if (attaching || reallocating) { /* * Check if controller supports FW diag buffers and set flag to * enable each type. */ if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE]. enabled = TRUE; if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT]. enabled = TRUE; if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED]. enabled = TRUE; /* * Set flags for some supported items. */ if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) sc->eedp_enabled = TRUE; if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) sc->control_TLR = TRUE; if (sc->facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) sc->atomic_desc_capable = TRUE; mpr_resize_queues(sc); /* * Initialize all Tail Queues */ TAILQ_INIT(&sc->req_list); TAILQ_INIT(&sc->high_priority_req_list); TAILQ_INIT(&sc->chain_list); TAILQ_INIT(&sc->prp_page_list); TAILQ_INIT(&sc->tm_list); } /* * If doing a Diag Reset and the FW is significantly different * (reallocating will be set above in IOC Facts comparison), then all * buffers based on the IOC Facts will need to be freed before they are * reallocated. */ if (reallocating) { mpr_iocfacts_free(sc); mprsas_realloc_targets(sc, saved_facts.MaxTargets + saved_facts.MaxVolumes); } /* * Any deallocation has been completed. Now start reallocating * if needed. Will only need to reallocate if attaching or if the new * IOC Facts are different from the previous IOC Facts after a Diag * Reset. Targets have already been allocated above if needed. */ error = 0; while (attaching || reallocating) { if ((error = mpr_alloc_hw_queues(sc)) != 0) break; if ((error = mpr_alloc_replies(sc)) != 0) break; if ((error = mpr_alloc_requests(sc)) != 0) break; if ((error = mpr_alloc_queues(sc)) != 0) break; break; } if (error) { mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Failed to alloc queues with error %d\n", error); mpr_free(sc); return (error); } /* Always initialize the queues */ bzero(sc->free_queue, sc->fqdepth * 4); mpr_init_queues(sc); /* * Always get the chip out of the reset state, but only panic if not * attaching. If attaching and there is an error, that is handled by * the OS. */ error = mpr_transition_operational(sc); if (error != 0) { mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to " "transition to operational with error %d\n", error); mpr_free(sc); return (error); } /* * Finish the queue initialization. * These are set here instead of in mpr_init_queues() because the * IOC resets these values during the state transition in * mpr_transition_operational(). The free index is set to 1 * because the corresponding index in the IOC is set to 0, and the * IOC treats the queues as full if both are set to the same value. * Hence the reason that the queue can't hold all of the possible * replies. */ sc->replypostindex = 0; mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0); /* * Attach the subsystems so they can prepare their event masks. * XXX Should be dynamic so that IM/IR and user modules can attach */ error = 0; while (attaching) { mpr_dprint(sc, MPR_INIT, "Attaching subsystems\n"); if ((error = mpr_attach_log(sc)) != 0) break; if ((error = mpr_attach_sas(sc)) != 0) break; if ((error = mpr_attach_user(sc)) != 0) break; break; } if (error) { mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Failed to attach all subsystems: error %d\n", error); mpr_free(sc); return (error); } /* * XXX If the number of MSI-X vectors changes during re-init, this * won't see it and adjust. */ if (attaching && (error = mpr_pci_setup_interrupts(sc)) != 0) { mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Failed to setup interrupts\n"); mpr_free(sc); return (error); } return (error); } /* * This is called if memory is being free (during detach for example) and when * buffers need to be reallocated due to a Diag Reset. */ static void mpr_iocfacts_free(struct mpr_softc *sc) { struct mpr_command *cm; int i; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if (sc->free_busaddr != 0) bus_dmamap_unload(sc->queues_dmat, sc->queues_map); if (sc->free_queue != NULL) bus_dmamem_free(sc->queues_dmat, sc->free_queue, sc->queues_map); if (sc->queues_dmat != NULL) bus_dma_tag_destroy(sc->queues_dmat); if (sc->chain_busaddr != 0) bus_dmamap_unload(sc->chain_dmat, sc->chain_map); if (sc->chain_frames != NULL) bus_dmamem_free(sc->chain_dmat, sc->chain_frames, sc->chain_map); if (sc->chain_dmat != NULL) bus_dma_tag_destroy(sc->chain_dmat); if (sc->sense_busaddr != 0) bus_dmamap_unload(sc->sense_dmat, sc->sense_map); if (sc->sense_frames != NULL) bus_dmamem_free(sc->sense_dmat, sc->sense_frames, sc->sense_map); if (sc->sense_dmat != NULL) bus_dma_tag_destroy(sc->sense_dmat); if (sc->prp_page_busaddr != 0) bus_dmamap_unload(sc->prp_page_dmat, sc->prp_page_map); if (sc->prp_pages != NULL) bus_dmamem_free(sc->prp_page_dmat, sc->prp_pages, sc->prp_page_map); if (sc->prp_page_dmat != NULL) bus_dma_tag_destroy(sc->prp_page_dmat); if (sc->reply_busaddr != 0) bus_dmamap_unload(sc->reply_dmat, sc->reply_map); if (sc->reply_frames != NULL) bus_dmamem_free(sc->reply_dmat, sc->reply_frames, sc->reply_map); if (sc->reply_dmat != NULL) bus_dma_tag_destroy(sc->reply_dmat); if (sc->req_busaddr != 0) bus_dmamap_unload(sc->req_dmat, sc->req_map); if (sc->req_frames != NULL) bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); if (sc->req_dmat != NULL) bus_dma_tag_destroy(sc->req_dmat); if (sc->chains != NULL) free(sc->chains, M_MPR); if (sc->prps != NULL) free(sc->prps, M_MPR); if (sc->commands != NULL) { for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); } free(sc->commands, M_MPR); } if (sc->buffer_dmat != NULL) bus_dma_tag_destroy(sc->buffer_dmat); mpr_pci_free_interrupts(sc); free(sc->queues, M_MPR); sc->queues = NULL; } /* * The terms diag reset and hard reset are used interchangeably in the MPI * docs to mean resetting the controller chip. In this code diag reset * cleans everything up, and the hard reset function just sends the reset * sequence to the chip. This should probably be refactored so that every * subsystem gets a reset notification of some sort, and can clean up * appropriately. */ int mpr_reinit(struct mpr_softc *sc) { int error; struct mprsas_softc *sassc; sassc = sc->sassc; MPR_FUNCTRACE(sc); mtx_assert(&sc->mpr_mtx, MA_OWNED); mpr_dprint(sc, MPR_INIT|MPR_INFO, "Reinitializing controller\n"); if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) { mpr_dprint(sc, MPR_INIT, "Reset already in progress\n"); return 0; } /* * Make sure the completion callbacks can recognize they're getting * a NULL cm_reply due to a reset. */ sc->mpr_flags |= MPR_FLAGS_DIAGRESET; /* * Mask interrupts here. */ mpr_dprint(sc, MPR_INIT, "Masking interrupts and resetting\n"); mpr_mask_intr(sc); error = mpr_diag_reset(sc, CAN_SLEEP); if (error != 0) { panic("%s hard reset failed with error %d\n", __func__, error); } /* Restore the PCI state, including the MSI-X registers */ mpr_pci_restore(sc); /* Give the I/O subsystem special priority to get itself prepared */ mprsas_handle_reinit(sc); /* * Get IOC Facts and allocate all structures based on this information. * The attach function will also call mpr_iocfacts_allocate at startup. * If relevant values have changed in IOC Facts, this function will free * all of the memory based on IOC Facts and reallocate that memory. */ if ((error = mpr_iocfacts_allocate(sc, FALSE)) != 0) { panic("%s IOC Facts based allocation failed with error %d\n", __func__, error); } /* * Mapping structures will be re-allocated after getting IOC Page8, so * free these structures here. */ mpr_mapping_exit(sc); /* * The static page function currently read is IOC Page8. Others can be * added in future. It's possible that the values in IOC Page8 have * changed after a Diag Reset due to user modification, so always read * these. Interrupts are masked, so unmask them before getting config * pages. */ mpr_unmask_intr(sc); sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET; mpr_base_static_config_pages(sc); /* * Some mapping info is based in IOC Page8 data, so re-initialize the * mapping tables. */ mpr_mapping_initialize(sc); /* * Restart will reload the event masks clobbered by the reset, and * then enable the port. */ mpr_reregister_events(sc); /* the end of discovery will release the simq, so we're done. */ mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Finished sc %p post %u free %u\n", sc, sc->replypostindex, sc->replyfreeindex); mprsas_release_simq_reinit(sassc); mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error); return 0; } /* Wait for the chip to ACK a word that we've put into its FIFO * Wait for seconds. In single loop wait for busy loop * for 500 microseconds. * Total is [ 0.5 * (2000 * ) ] in miliseconds. * */ static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag) { u32 cntdn, count; u32 int_status; u32 doorbell; count = 0; cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; do { int_status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { mpr_dprint(sc, MPR_TRACE, "%s: successful count(%d), " "timeout(%d)\n", __func__, count, timeout); return 0; } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { doorbell = mpr_regread(sc, MPI2_DOORBELL_OFFSET); if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { mpr_dprint(sc, MPR_FAULT, "fault_state(0x%04x)!\n", doorbell); return (EFAULT); } } else if (int_status == 0xFFFFFFFF) goto out; /* * If it can sleep, sleep for 1 milisecond, else busy loop for * 0.5 milisecond */ if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdba", hz/1000); else if (sleep_flag == CAN_SLEEP) pause("mprdba", hz/1000); else DELAY(500); count++; } while (--cntdn); out: mpr_dprint(sc, MPR_FAULT, "%s: failed due to timeout count(%d), " "int_status(%x)!\n", __func__, count, int_status); return (ETIMEDOUT); } /* Wait for the chip to signal that the next word in its FIFO can be fetched */ static int mpr_wait_db_int(struct mpr_softc *sc) { int retry; for (retry = 0; retry < MPR_DB_MAX_WAIT; retry++) { if ((mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & MPI2_HIS_IOC2SYS_DB_STATUS) != 0) return (0); DELAY(2000); } return (ETIMEDOUT); } /* Step through the synchronous command state machine, i.e. "Doorbell mode" */ static int mpr_request_sync(struct mpr_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply, int req_sz, int reply_sz, int timeout) { uint32_t *data32; uint16_t *data16; int i, count, ioc_sz, residual; int sleep_flags = CAN_SLEEP; #if __FreeBSD_version >= 1000029 if (curthread->td_no_sleeping) #else //__FreeBSD_version < 1000029 if (curthread->td_pflags & TDP_NOSLEEPING) #endif //__FreeBSD_version >= 1000029 sleep_flags = NO_SLEEP; /* Step 1 */ mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); /* Step 2 */ if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) return (EBUSY); /* Step 3 * Announce that a message is coming through the doorbell. Messages * are pushed at 32bit words, so round up if needed. */ count = (req_sz + 3) / 4; mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) | (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT)); /* Step 4 */ if (mpr_wait_db_int(sc) || (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) { mpr_dprint(sc, MPR_FAULT, "Doorbell failed to activate\n"); return (ENXIO); } mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed\n"); return (ENXIO); } /* Step 5 */ /* Clock out the message data synchronously in 32-bit dwords*/ data32 = (uint32_t *)req; for (i = 0; i < count; i++) { mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i])); if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { mpr_dprint(sc, MPR_FAULT, "Timeout while writing doorbell\n"); return (ENXIO); } } /* Step 6 */ /* Clock in the reply in 16-bit words. The total length of the * message is always in the 4th byte, so clock out the first 2 words * manually, then loop the rest. */ data16 = (uint16_t *)reply; if (mpr_wait_db_int(sc) != 0) { mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 0\n"); return (ENXIO); } data16[0] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); if (mpr_wait_db_int(sc) != 0) { mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 1\n"); return (ENXIO); } data16[1] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); /* Number of 32bit words in the message */ ioc_sz = reply->MsgLength; /* * Figure out how many 16bit words to clock in without overrunning. * The precision loss with dividing reply_sz can safely be * ignored because the messages can only be multiples of 32bits. */ residual = 0; count = MIN((reply_sz / 4), ioc_sz) * 2; if (count < ioc_sz * 2) { residual = ioc_sz * 2 - count; mpr_dprint(sc, MPR_ERROR, "Driver error, throwing away %d " "residual message words\n", residual); } for (i = 2; i < count; i++) { if (mpr_wait_db_int(sc) != 0) { mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell %d\n", i); return (ENXIO); } data16[i] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); } /* * Pull out residual words that won't fit into the provided buffer. * This keeps the chip from hanging due to a driver programming * error. */ while (residual--) { if (mpr_wait_db_int(sc) != 0) { mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell\n"); return (ENXIO); } (void)mpr_regread(sc, MPI2_DOORBELL_OFFSET); mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); } /* Step 7 */ if (mpr_wait_db_int(sc) != 0) { mpr_dprint(sc, MPR_FAULT, "Timeout waiting to exit doorbell\n"); return (ENXIO); } if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) mpr_dprint(sc, MPR_FAULT, "Warning, doorbell still active\n"); mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); return (0); } static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm) { request_descriptor rd; MPR_FUNCTRACE(sc); mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n", cm->cm_desc.Default.SMID, cm, cm->cm_ccb); if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags & MPR_FLAGS_SHUTDOWN)) mtx_assert(&sc->mpr_mtx, MA_OWNED); if (++sc->io_cmds_active > sc->io_cmds_highwater) sc->io_cmds_highwater++; if (sc->atomic_desc_capable) { rd.u.low = cm->cm_desc.Words.Low; mpr_regwrite(sc, MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET, rd.u.low); } else { rd.u.low = cm->cm_desc.Words.Low; rd.u.high = cm->cm_desc.Words.High; rd.word = htole64(rd.word); mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET, rd.u.low); mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET, rd.u.high); } } /* * Just the FACTS, ma'am. */ static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts) { MPI2_DEFAULT_REPLY *reply; MPI2_IOC_FACTS_REQUEST request; int error, req_sz, reply_sz; MPR_FUNCTRACE(sc); mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); req_sz = sizeof(MPI2_IOC_FACTS_REQUEST); reply_sz = sizeof(MPI2_IOC_FACTS_REPLY); reply = (MPI2_DEFAULT_REPLY *)facts; bzero(&request, req_sz); request.Function = MPI2_FUNCTION_IOC_FACTS; error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5); mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error); return (error); } static int mpr_send_iocinit(struct mpr_softc *sc) { MPI2_IOC_INIT_REQUEST init; MPI2_DEFAULT_REPLY reply; int req_sz, reply_sz, error; struct timeval now; uint64_t time_in_msec; MPR_FUNCTRACE(sc); mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); req_sz = sizeof(MPI2_IOC_INIT_REQUEST); reply_sz = sizeof(MPI2_IOC_INIT_REPLY); bzero(&init, req_sz); bzero(&reply, reply_sz); /* * Fill in the init block. Note that most addresses are * deliberately in the lower 32bits of memory. This is a micro- * optimzation for PCI/PCIX, though it's not clear if it helps PCIe. */ init.Function = MPI2_FUNCTION_IOC_INIT; init.WhoInit = MPI2_WHOINIT_HOST_DRIVER; init.MsgVersion = htole16(MPI2_VERSION); init.HeaderVersion = htole16(MPI2_HEADER_VERSION); init.SystemRequestFrameSize = htole16(sc->facts->IOCRequestFrameSize); init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth); init.ReplyFreeQueueDepth = htole16(sc->fqdepth); init.SenseBufferAddressHigh = 0; init.SystemReplyAddressHigh = 0; init.SystemRequestFrameBaseAddress.High = 0; init.SystemRequestFrameBaseAddress.Low = htole32((uint32_t)sc->req_busaddr); init.ReplyDescriptorPostQueueAddress.High = 0; init.ReplyDescriptorPostQueueAddress.Low = htole32((uint32_t)sc->post_busaddr); init.ReplyFreeQueueAddress.High = 0; init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr); getmicrotime(&now); time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000); init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF); init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF); init.HostPageSize = HOST_PAGE_SIZE_4K; error = mpr_request_sync(sc, &init, &reply, req_sz, reply_sz, 5); if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) error = ENXIO; mpr_dprint(sc, MPR_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus); mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); return (error); } void mpr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *addr; addr = arg; *addr = segs[0].ds_addr; } static int mpr_alloc_queues(struct mpr_softc *sc) { struct mpr_queue *q; int nq, i; nq = sc->msi_msgs; mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Allocating %d I/O queues\n", nq); - sc->queues = malloc(sizeof(struct mpr_queue) * nq, M_MPR, + sc->queues = mallocarray(nq, sizeof(struct mpr_queue), M_MPR, M_NOWAIT|M_ZERO); if (sc->queues == NULL) return (ENOMEM); for (i = 0; i < nq; i++) { q = &sc->queues[i]; mpr_dprint(sc, MPR_INIT, "Configuring queue %d %p\n", i, q); q->sc = sc; q->qnum = i; } return (0); } static int mpr_alloc_hw_queues(struct mpr_softc *sc) { bus_addr_t queues_busaddr; uint8_t *queues; int qsize, fqsize, pqsize; /* * The reply free queue contains 4 byte entries in multiples of 16 and * aligned on a 16 byte boundary. There must always be an unused entry. * This queue supplies fresh reply frames for the firmware to use. * * The reply descriptor post queue contains 8 byte entries in * multiples of 16 and aligned on a 16 byte boundary. This queue * contains filled-in reply frames sent from the firmware to the host. * * These two queues are allocated together for simplicity. */ sc->fqdepth = roundup2(sc->num_replies + 1, 16); sc->pqdepth = roundup2(sc->num_replies + 1, 16); fqsize= sc->fqdepth * 4; pqsize = sc->pqdepth * 8; qsize = fqsize + pqsize; if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 16, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ qsize, /* maxsize */ 1, /* nsegments */ qsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->queues_dmat)) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, &sc->queues_map)) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues memory\n"); return (ENOMEM); } bzero(queues, qsize); bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, mpr_memaddr_cb, &queues_busaddr, 0); sc->free_queue = (uint32_t *)queues; sc->free_busaddr = queues_busaddr; sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); sc->post_busaddr = queues_busaddr + fqsize; return (0); } static int mpr_alloc_replies(struct mpr_softc *sc) { int rsize, num_replies; /* * sc->num_replies should be one less than sc->fqdepth. We need to * allocate space for sc->fqdepth replies, but only sc->num_replies * replies can be used at once. */ num_replies = max(sc->fqdepth, sc->num_replies); rsize = sc->facts->ReplyFrameSize * num_replies * 4; if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 4, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ rsize, /* maxsize */ 1, /* nsegments */ rsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->reply_dmat)) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, BUS_DMA_NOWAIT, &sc->reply_map)) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies memory\n"); return (ENOMEM); } bzero(sc->reply_frames, rsize); bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, mpr_memaddr_cb, &sc->reply_busaddr, 0); return (0); } static int mpr_alloc_requests(struct mpr_softc *sc) { struct mpr_command *cm; struct mpr_chain *chain; int i, rsize, nsegs; rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4; if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 16, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ rsize, /* maxsize */ 1, /* nsegments */ rsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->req_dmat)) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate request DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, BUS_DMA_NOWAIT, &sc->req_map)) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate request memory\n"); return (ENOMEM); } bzero(sc->req_frames, rsize); bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, mpr_memaddr_cb, &sc->req_busaddr, 0); /* * Gen3 and beyond uses the IOCMaxChainSegmentSize from IOC Facts to * get the size of a Chain Frame. Previous versions use the size as a * Request Frame for the Chain Frame size. If IOCMaxChainSegmentSize * is 0, use the default value. The IOCMaxChainSegmentSize is the * number of 16-byte elelements that can fit in a Chain Frame, which is * the size of an IEEE Simple SGE. */ if (sc->facts->MsgVersion >= MPI2_VERSION_02_05) { sc->chain_seg_size = htole16(sc->facts->IOCMaxChainSegmentSize); if (sc->chain_seg_size == 0) { sc->chain_frame_size = MPR_DEFAULT_CHAIN_SEG_SIZE * MPR_MAX_CHAIN_ELEMENT_SIZE; } else { sc->chain_frame_size = sc->chain_seg_size * MPR_MAX_CHAIN_ELEMENT_SIZE; } } else { sc->chain_frame_size = sc->facts->IOCRequestFrameSize * 4; } rsize = sc->chain_frame_size * sc->max_chains; if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 16, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ rsize, /* maxsize */ 1, /* nsegments */ rsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->chain_dmat)) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, BUS_DMA_NOWAIT, &sc->chain_map)) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n"); return (ENOMEM); } bzero(sc->chain_frames, rsize); bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize, mpr_memaddr_cb, &sc->chain_busaddr, 0); rsize = MPR_SENSE_LEN * sc->num_reqs; if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ rsize, /* maxsize */ 1, /* nsegments */ rsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sense_dmat)) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, BUS_DMA_NOWAIT, &sc->sense_map)) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense memory\n"); return (ENOMEM); } bzero(sc->sense_frames, rsize); bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, mpr_memaddr_cb, &sc->sense_busaddr, 0); sc->chains = malloc(sizeof(struct mpr_chain) * sc->max_chains, M_MPR, M_WAITOK | M_ZERO); if (!sc->chains) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n"); return (ENOMEM); } for (i = 0; i < sc->max_chains; i++) { chain = &sc->chains[i]; chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames + i * sc->chain_frame_size); chain->chain_busaddr = sc->chain_busaddr + i * sc->chain_frame_size; mpr_free_chain(sc, chain); sc->chain_free_lowwater++; } /* * Allocate NVMe PRP Pages for NVMe SGL support only if the FW supports * these devices. */ if ((sc->facts->MsgVersion >= MPI2_VERSION_02_06) && (sc->facts->ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES)) { if (mpr_alloc_nvme_prp_pages(sc) == ENOMEM) return (ENOMEM); } /* XXX Need to pick a more precise value */ nsegs = (MAXPHYS / PAGE_SIZE) + 1; if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ nsegs, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->mpr_mtx, /* lockarg */ &sc->buffer_dmat)) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate buffer DMA tag\n"); return (ENOMEM); } /* * SMID 0 cannot be used as a free command per the firmware spec. * Just drop that command instead of risking accounting bugs. */ sc->commands = malloc(sizeof(struct mpr_command) * sc->num_reqs, M_MPR, M_WAITOK | M_ZERO); if (!sc->commands) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate command memory\n"); return (ENOMEM); } for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; cm->cm_req = sc->req_frames + i * sc->facts->IOCRequestFrameSize * 4; cm->cm_req_busaddr = sc->req_busaddr + i * sc->facts->IOCRequestFrameSize * 4; cm->cm_sense = &sc->sense_frames[i]; cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN; cm->cm_desc.Default.SMID = i; cm->cm_sc = sc; TAILQ_INIT(&cm->cm_chain_list); TAILQ_INIT(&cm->cm_prp_page_list); callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0); /* XXX Is a failure here a critical problem? */ if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0) { if (i <= sc->facts->HighPriorityCredit) mpr_free_high_priority_command(sc, cm); else mpr_free_command(sc, cm); } else { panic("failed to allocate command %d\n", i); sc->num_reqs = i; break; } } return (0); } /* * Allocate contiguous buffers for PCIe NVMe devices for building native PRPs, * which are scatter/gather lists for NVMe devices. * * This buffer must be contiguous due to the nature of how NVMe PRPs are built * and translated by FW. * * returns ENOMEM if memory could not be allocated, otherwise returns 0. */ static int mpr_alloc_nvme_prp_pages(struct mpr_softc *sc) { int PRPs_per_page, PRPs_required, pages_required; int rsize, i; struct mpr_prp_page *prp_page; /* * Assuming a MAX_IO_SIZE of 1MB and a PAGE_SIZE of 4k, the max number * of PRPs (NVMe's Scatter/Gather Element) needed per I/O is: * MAX_IO_SIZE / PAGE_SIZE = 256 * * 1 PRP entry in main frame for PRP list pointer still leaves 255 PRPs * required for the remainder of the 1MB I/O. 512 PRPs can fit into one * page (4096 / 8 = 512), so only one page is required for each I/O. * * Each of these buffers will need to be contiguous. For simplicity, * only one buffer is allocated here, which has all of the space * required for the NVMe Queue Depth. If there are problems allocating * this one buffer, this function will need to change to allocate * individual, contiguous NVME_QDEPTH buffers. * * The real calculation will use the real max io size. Above is just an * example. * */ PRPs_required = sc->maxio / PAGE_SIZE; PRPs_per_page = (PAGE_SIZE / PRP_ENTRY_SIZE) - 1; pages_required = (PRPs_required / PRPs_per_page) + 1; sc->prp_buffer_size = PAGE_SIZE * pages_required; rsize = sc->prp_buffer_size * NVME_QDEPTH; if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 4, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ rsize, /* maxsize */ 1, /* nsegments */ rsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->prp_page_dmat)) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP DMA " "tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->prp_page_dmat, (void **)&sc->prp_pages, BUS_DMA_NOWAIT, &sc->prp_page_map)) { mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP memory\n"); return (ENOMEM); } bzero(sc->prp_pages, rsize); bus_dmamap_load(sc->prp_page_dmat, sc->prp_page_map, sc->prp_pages, rsize, mpr_memaddr_cb, &sc->prp_page_busaddr, 0); sc->prps = malloc(sizeof(struct mpr_prp_page) * NVME_QDEPTH, M_MPR, M_WAITOK | M_ZERO); for (i = 0; i < NVME_QDEPTH; i++) { prp_page = &sc->prps[i]; prp_page->prp_page = (uint64_t *)(sc->prp_pages + i * sc->prp_buffer_size); prp_page->prp_page_busaddr = (uint64_t)(sc->prp_page_busaddr + i * sc->prp_buffer_size); mpr_free_prp_page(sc, prp_page); sc->prp_pages_free_lowwater++; } return (0); } static int mpr_init_queues(struct mpr_softc *sc) { int i; memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); /* * According to the spec, we need to use one less reply than we * have space for on the queue. So sc->num_replies (the number we * use) should be less than sc->fqdepth (allocated size). */ if (sc->num_replies >= sc->fqdepth) return (EINVAL); /* * Initialize all of the free queue entries. */ for (i = 0; i < sc->fqdepth; i++) { sc->free_queue[i] = sc->reply_busaddr + (i * sc->facts->ReplyFrameSize * 4); } sc->replyfreeindex = sc->num_replies; return (0); } /* Get the driver parameter tunables. Lowest priority are the driver defaults. * Next are the global settings, if they exist. Highest are the per-unit * settings, if they exist. */ void mpr_get_tunables(struct mpr_softc *sc) { char tmpstr[80], mpr_debug[80]; /* XXX default to some debugging for now */ sc->mpr_debug = MPR_INFO | MPR_FAULT; sc->disable_msix = 0; sc->disable_msi = 0; sc->max_msix = MPR_MSIX_MAX; sc->max_chains = MPR_CHAIN_FRAMES; sc->max_io_pages = MPR_MAXIO_PAGES; sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD; sc->spinup_wait_time = DEFAULT_SPINUP_WAIT; sc->use_phynum = 1; sc->max_reqframes = MPR_REQ_FRAMES; sc->max_prireqframes = MPR_PRI_REQ_FRAMES; sc->max_replyframes = MPR_REPLY_FRAMES; sc->max_evtframes = MPR_EVT_REPLY_FRAMES; /* * Grab the global variables. */ bzero(mpr_debug, 80); if (TUNABLE_STR_FETCH("hw.mpr.debug_level", mpr_debug, 80) != 0) mpr_parse_debug(sc, mpr_debug); TUNABLE_INT_FETCH("hw.mpr.disable_msix", &sc->disable_msix); TUNABLE_INT_FETCH("hw.mpr.disable_msi", &sc->disable_msi); TUNABLE_INT_FETCH("hw.mpr.max_msix", &sc->max_msix); TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains); TUNABLE_INT_FETCH("hw.mpr.max_io_pages", &sc->max_io_pages); TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu); TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time); TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum); TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes); TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes); TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes); TUNABLE_INT_FETCH("hw.mpr.max_evtframes", &sc->max_evtframes); /* Grab the unit-instance variables */ snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.debug_level", device_get_unit(sc->mpr_dev)); bzero(mpr_debug, 80); if (TUNABLE_STR_FETCH(tmpstr, mpr_debug, 80) != 0) mpr_parse_debug(sc, mpr_debug); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msix", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msi", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_msix", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_msix); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_chains", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_io_pages", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages); bzero(sc->exclude_ids, sizeof(sc->exclude_ids)); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.exclude_ids", device_get_unit(sc->mpr_dev)); TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids)); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.enable_ssu", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.spinup_wait_time", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.use_phy_num", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_prireqframes", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_replyframes", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes); snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_evtframes", device_get_unit(sc->mpr_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes); } static void mpr_setup_sysctl(struct mpr_softc *sc) { struct sysctl_ctx_list *sysctl_ctx = NULL; struct sysctl_oid *sysctl_tree = NULL; char tmpstr[80], tmpstr2[80]; /* * Setup the sysctl variable so the user can change the debug level * on the fly. */ snprintf(tmpstr, sizeof(tmpstr), "MPR controller %d", device_get_unit(sc->mpr_dev)); snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev)); sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev); if (sysctl_ctx != NULL) sysctl_tree = device_get_sysctl_tree(sc->mpr_dev); if (sysctl_tree == NULL) { sysctl_ctx_init(&sc->sysctl_ctx); sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_mpr), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr); if (sc->sysctl_tree == NULL) return; sysctl_ctx = &sc->sysctl_ctx; sysctl_tree = sc->sysctl_tree; } SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "debug_level", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, sc, 0, mpr_debug_sysctl, "A", "mpr debug level"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0, "Disable the use of MSI-X interrupts"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_msix", CTLFLAG_RD, &sc->max_msix, 0, "User-defined maximum number of MSIX queues"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "msix_msgs", CTLFLAG_RD, &sc->msi_msgs, 0, "Negotiated number of MSIX queues"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0, "Total number of allocated request frames"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0, "Total number of allocated high priority request frames"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0, "Total number of allocated reply frames"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0, "Total number of event frames allocated"); SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "firmware_version", CTLFLAG_RW, sc->fw_version, strlen(sc->fw_version), "firmware version"); SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "driver_version", CTLFLAG_RW, MPR_DRIVER_VERSION, strlen(MPR_DRIVER_VERSION), "driver version"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "io_cmds_active", CTLFLAG_RD, &sc->io_cmds_active, 0, "number of currently active commands"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, &sc->io_cmds_highwater, 0, "maximum active commands seen"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "chain_free", CTLFLAG_RD, &sc->chain_free, 0, "number of free chain elements"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "chain_free_lowwater", CTLFLAG_RD, &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_chains", CTLFLAG_RD, &sc->max_chains, 0,"maximum chain frames that will be allocated"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_io_pages", CTLFLAG_RD, &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use " "IOCFacts)"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0, "enable SSU to SATA SSD/HDD at shutdown"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "chain_alloc_fail", CTLFLAG_RD, &sc->chain_alloc_fail, "chain allocation failures"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "spinup_wait_time", CTLFLAG_RD, &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for " "spinup after SATA ID error"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0, "Use the phy number for enumeration"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "prp_pages_free", CTLFLAG_RD, &sc->prp_pages_free, 0, "number of free PRP pages"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "prp_pages_free_lowwater", CTLFLAG_RD, &sc->prp_pages_free_lowwater, 0,"lowest number of free PRP pages"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD, &sc->prp_page_alloc_fail, "PRP page allocation failures"); } static struct mpr_debug_string { char *name; int flag; } mpr_debug_strings[] = { {"info", MPR_INFO}, {"fault", MPR_FAULT}, {"event", MPR_EVENT}, {"log", MPR_LOG}, {"recovery", MPR_RECOVERY}, {"error", MPR_ERROR}, {"init", MPR_INIT}, {"xinfo", MPR_XINFO}, {"user", MPR_USER}, {"mapping", MPR_MAPPING}, {"trace", MPR_TRACE} }; enum mpr_debug_level_combiner { COMB_NONE, COMB_ADD, COMB_SUB }; static int mpr_debug_sysctl(SYSCTL_HANDLER_ARGS) { struct mpr_softc *sc; struct mpr_debug_string *string; struct sbuf *sbuf; char *buffer; size_t sz; int i, len, debug, error; sc = (struct mpr_softc *)arg1; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); debug = sc->mpr_debug; sbuf_printf(sbuf, "%#x", debug); sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]); for (i = 0; i < sz; i++) { string = &mpr_debug_strings[i]; if (debug & string->flag) sbuf_printf(sbuf, ",%s", string->name); } error = sbuf_finish(sbuf); sbuf_delete(sbuf); if (error || req->newptr == NULL) return (error); len = req->newlen - req->newidx; if (len == 0) return (0); buffer = malloc(len, M_MPR, M_ZERO|M_WAITOK); error = SYSCTL_IN(req, buffer, len); mpr_parse_debug(sc, buffer); free(buffer, M_MPR); return (error); } static void mpr_parse_debug(struct mpr_softc *sc, char *list) { struct mpr_debug_string *string; enum mpr_debug_level_combiner op; char *token, *endtoken; size_t sz; int flags, i; if (list == NULL || *list == '\0') return; if (*list == '+') { op = COMB_ADD; list++; } else if (*list == '-') { op = COMB_SUB; list++; } else op = COMB_NONE; if (*list == '\0') return; flags = 0; sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]); while ((token = strsep(&list, ":,")) != NULL) { /* Handle integer flags */ flags |= strtol(token, &endtoken, 0); if (token != endtoken) continue; /* Handle text flags */ for (i = 0; i < sz; i++) { string = &mpr_debug_strings[i]; if (strcasecmp(token, string->name) == 0) { flags |= string->flag; break; } } } switch (op) { case COMB_NONE: sc->mpr_debug = flags; break; case COMB_ADD: sc->mpr_debug |= flags; break; case COMB_SUB: sc->mpr_debug &= (~flags); break; } return; } int mpr_attach(struct mpr_softc *sc) { int error; MPR_FUNCTRACE(sc); mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); mtx_init(&sc->mpr_mtx, "MPR lock", NULL, MTX_DEF); callout_init_mtx(&sc->periodic, &sc->mpr_mtx, 0); callout_init_mtx(&sc->device_check_callout, &sc->mpr_mtx, 0); TAILQ_INIT(&sc->event_list); timevalclear(&sc->lastfail); if ((error = mpr_transition_ready(sc)) != 0) { mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to transition ready\n"); return (error); } sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR, M_ZERO|M_NOWAIT); if (!sc->facts) { mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Cannot allocate memory, exit\n"); return (ENOMEM); } /* * Get IOC Facts and allocate all structures based on this information. * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC * Facts. If relevant values have changed in IOC Facts, this function * will free all of the memory based on IOC Facts and reallocate that * memory. If this fails, any allocated memory should already be freed. */ if ((error = mpr_iocfacts_allocate(sc, TRUE)) != 0) { mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC Facts allocation " "failed with error %d\n", error); return (error); } /* Start the periodic watchdog check on the IOC Doorbell */ mpr_periodic(sc); /* * The portenable will kick off discovery events that will drive the * rest of the initialization process. The CAM/SAS module will * hold up the boot sequence until discovery is complete. */ sc->mpr_ich.ich_func = mpr_startup; sc->mpr_ich.ich_arg = sc; if (config_intrhook_establish(&sc->mpr_ich) != 0) { mpr_dprint(sc, MPR_INIT|MPR_ERROR, "Cannot establish MPR config hook\n"); error = EINVAL; } /* * Allow IR to shutdown gracefully when shutdown occurs. */ sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, mprsas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT); if (sc->shutdown_eh == NULL) mpr_dprint(sc, MPR_INIT|MPR_ERROR, "shutdown event registration failed\n"); mpr_setup_sysctl(sc); sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE; mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error); return (error); } /* Run through any late-start handlers. */ static void mpr_startup(void *arg) { struct mpr_softc *sc; sc = (struct mpr_softc *)arg; mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); mpr_lock(sc); mpr_unmask_intr(sc); /* initialize device mapping tables */ mpr_base_static_config_pages(sc); mpr_mapping_initialize(sc); mprsas_startup(sc); mpr_unlock(sc); mpr_dprint(sc, MPR_INIT, "disestablish config intrhook\n"); config_intrhook_disestablish(&sc->mpr_ich); sc->mpr_ich.ich_arg = NULL; mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); } /* Periodic watchdog. Is called with the driver lock already held. */ static void mpr_periodic(void *arg) { struct mpr_softc *sc; uint32_t db; sc = (struct mpr_softc *)arg; if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN) return; db = mpr_regread(sc, MPI2_DOORBELL_OFFSET); if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { if ((db & MPI2_DOORBELL_FAULT_CODE_MASK) == IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED) { panic("TEMPERATURE FAULT: STOPPING."); } mpr_dprint(sc, MPR_FAULT, "IOC Fault 0x%08x, Resetting\n", db); mpr_reinit(sc); } callout_reset(&sc->periodic, MPR_PERIODIC_DELAY * hz, mpr_periodic, sc); } static void mpr_log_evt_handler(struct mpr_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *event) { MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry; MPR_DPRINT_EVENT(sc, generic, event); switch (event->Event) { case MPI2_EVENT_LOG_DATA: mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_DATA:\n"); if (sc->mpr_debug & MPR_EVENT) hexdump(event->EventData, event->EventDataLength, NULL, 0); break; case MPI2_EVENT_LOG_ENTRY_ADDED: entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event " "0x%x Sequence %d:\n", entry->LogEntryQualifier, entry->LogSequence); break; default: break; } return; } static int mpr_attach_log(struct mpr_softc *sc) { uint8_t events[16]; bzero(events, 16); setbit(events, MPI2_EVENT_LOG_DATA); setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); mpr_register_events(sc, events, mpr_log_evt_handler, NULL, &sc->mpr_log_eh); return (0); } static int mpr_detach_log(struct mpr_softc *sc) { if (sc->mpr_log_eh != NULL) mpr_deregister_events(sc, sc->mpr_log_eh); return (0); } /* * Free all of the driver resources and detach submodules. Should be called * without the lock held. */ int mpr_free(struct mpr_softc *sc) { int error; mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); /* Turn off the watchdog */ mpr_lock(sc); sc->mpr_flags |= MPR_FLAGS_SHUTDOWN; mpr_unlock(sc); /* Lock must not be held for this */ callout_drain(&sc->periodic); callout_drain(&sc->device_check_callout); if (((error = mpr_detach_log(sc)) != 0) || ((error = mpr_detach_sas(sc)) != 0)) { mpr_dprint(sc, MPR_INIT|MPR_FAULT, "failed to detach " "subsystems, error= %d, exit\n", error); return (error); } mpr_detach_user(sc); /* Put the IOC back in the READY state. */ mpr_lock(sc); if ((error = mpr_transition_ready(sc)) != 0) { mpr_unlock(sc); return (error); } mpr_unlock(sc); if (sc->facts != NULL) free(sc->facts, M_MPR); /* * Free all buffers that are based on IOC Facts. A Diag Reset may need * to free these buffers too. */ mpr_iocfacts_free(sc); if (sc->sysctl_tree != NULL) sysctl_ctx_free(&sc->sysctl_ctx); /* Deregister the shutdown function */ if (sc->shutdown_eh != NULL) EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); mtx_destroy(&sc->mpr_mtx); mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); return (0); } static __inline void mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm) { MPR_FUNCTRACE(sc); if (cm == NULL) { mpr_dprint(sc, MPR_ERROR, "Completing NULL command\n"); return; } if (cm->cm_flags & MPR_CM_FLAGS_POLLED) cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; if (cm->cm_complete != NULL) { mpr_dprint(sc, MPR_TRACE, "%s cm %p calling cm_complete %p data %p reply %p\n", __func__, cm, cm->cm_complete, cm->cm_complete_data, cm->cm_reply); cm->cm_complete(sc, cm); } if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm); wakeup(cm); } if (sc->io_cmds_active != 0) { sc->io_cmds_active--; } else { mpr_dprint(sc, MPR_ERROR, "Warning: io_cmds_active is " "out of sync - resynching to 0\n"); } } static void mpr_sas_log_info(struct mpr_softc *sc , u32 log_info) { union loginfo_type { u32 loginfo; struct { u32 subcode:16; u32 code:8; u32 originator:4; u32 bus_type:4; } dw; }; union loginfo_type sas_loginfo; char *originator_str = NULL; sas_loginfo.loginfo = log_info; if (sas_loginfo.dw.bus_type != 3 /*SAS*/) return; /* each nexus loss loginfo */ if (log_info == 0x31170000) return; /* eat the loginfos associated with task aborts */ if ((log_info == 30050000) || (log_info == 0x31140000) || (log_info == 0x31130000)) return; switch (sas_loginfo.dw.originator) { case 0: originator_str = "IOP"; break; case 1: originator_str = "PL"; break; case 2: originator_str = "IR"; break; } mpr_dprint(sc, MPR_LOG, "log_info(0x%08x): originator(%s), " "code(0x%02x), sub_code(0x%04x)\n", log_info, originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode); } static void mpr_display_reply_info(struct mpr_softc *sc, uint8_t *reply) { MPI2DefaultReply_t *mpi_reply; u16 sc_status; mpi_reply = (MPI2DefaultReply_t*)reply; sc_status = le16toh(mpi_reply->IOCStatus); if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo)); } void mpr_intr(void *data) { struct mpr_softc *sc; uint32_t status; sc = (struct mpr_softc *)data; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); /* * Check interrupt status register to flush the bus. This is * needed for both INTx interrupts and driver-driven polling */ status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0) return; mpr_lock(sc); mpr_intr_locked(data); mpr_unlock(sc); return; } /* * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the * chip. Hopefully this theory is correct. */ void mpr_intr_msi(void *data) { struct mpr_softc *sc; sc = (struct mpr_softc *)data; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); mpr_lock(sc); mpr_intr_locked(data); mpr_unlock(sc); return; } /* * The locking is overly broad and simplistic, but easy to deal with for now. */ void mpr_intr_locked(void *data) { MPI2_REPLY_DESCRIPTORS_UNION *desc; struct mpr_softc *sc; struct mpr_command *cm = NULL; uint8_t flags; u_int pq; MPI2_DIAG_RELEASE_REPLY *rel_rep; mpr_fw_diagnostic_buffer_t *pBuffer; sc = (struct mpr_softc *)data; pq = sc->replypostindex; mpr_dprint(sc, MPR_TRACE, "%s sc %p starting with replypostindex %u\n", __func__, sc, sc->replypostindex); for ( ;; ) { cm = NULL; desc = &sc->post_queue[sc->replypostindex]; flags = desc->Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) || (le32toh(desc->Words.High) == 0xffffffff)) break; /* increment the replypostindex now, so that event handlers * and cm completion handlers which decide to do a diag * reset can zero it without it getting incremented again * afterwards, and we break out of this loop on the next * iteration since the reply post queue has been cleared to * 0xFF and all descriptors look unused (which they are). */ if (++sc->replypostindex >= sc->pqdepth) sc->replypostindex = 0; switch (flags) { case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS: case MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS: case MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS: cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)]; cm->cm_reply = NULL; break; case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY: { uint32_t baddr; uint8_t *reply; /* * Re-compose the reply address from the address * sent back from the chip. The ReplyFrameAddress * is the lower 32 bits of the physical address of * particular reply frame. Convert that address to * host format, and then use that to provide the * offset against the virtual address base * (sc->reply_frames). */ baddr = le32toh(desc->AddressReply.ReplyFrameAddress); reply = sc->reply_frames + (baddr - ((uint32_t)sc->reply_busaddr)); /* * Make sure the reply we got back is in a valid * range. If not, go ahead and panic here, since * we'll probably panic as soon as we deference the * reply pointer anyway. */ if ((reply < sc->reply_frames) || (reply > (sc->reply_frames + (sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) { printf("%s: WARNING: reply %p out of range!\n", __func__, reply); printf("%s: reply_frames %p, fqdepth %d, " "frame size %d\n", __func__, sc->reply_frames, sc->fqdepth, sc->facts->ReplyFrameSize * 4); printf("%s: baddr %#x,\n", __func__, baddr); /* LSI-TODO. See Linux Code for Graceful exit */ panic("Reply address out of range"); } if (le16toh(desc->AddressReply.SMID) == 0) { if (((MPI2_DEFAULT_REPLY *)reply)->Function == MPI2_FUNCTION_DIAG_BUFFER_POST) { /* * If SMID is 0 for Diag Buffer Post, * this implies that the reply is due to * a release function with a status that * the buffer has been released. Set * the buffer flags accordingly. */ rel_rep = (MPI2_DIAG_RELEASE_REPLY *)reply; if ((le16toh(rel_rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) { pBuffer = &sc->fw_diag_buffer_list[ rel_rep->BufferType]; pBuffer->valid_data = TRUE; pBuffer->owned_by_firmware = FALSE; pBuffer->immediate = FALSE; } } else mpr_dispatch_event(sc, baddr, (MPI2_EVENT_NOTIFICATION_REPLY *) reply); } else { cm = &sc->commands[ le16toh(desc->AddressReply.SMID)]; cm->cm_reply = reply; cm->cm_reply_data = le32toh(desc->AddressReply. ReplyFrameAddress); } break; } case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS: case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER: case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS: default: /* Unhandled */ mpr_dprint(sc, MPR_ERROR, "Unhandled reply 0x%x\n", desc->Default.ReplyFlags); cm = NULL; break; } if (cm != NULL) { // Print Error reply frame if (cm->cm_reply) mpr_display_reply_info(sc,cm->cm_reply); mpr_complete_command(sc, cm); } desc->Words.Low = 0xffffffff; desc->Words.High = 0xffffffff; } if (pq != sc->replypostindex) { mpr_dprint(sc, MPR_TRACE, "%s sc %p writing postindex %d\n", __func__, sc, sc->replypostindex); mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex); } return; } static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *reply) { struct mpr_event_handle *eh; int event, handled = 0; event = le16toh(reply->Event); TAILQ_FOREACH(eh, &sc->event_list, eh_list) { if (isset(eh->mask, event)) { eh->callback(sc, data, reply); handled++; } } if (handled == 0) mpr_dprint(sc, MPR_EVENT, "Unhandled event 0x%x\n", le16toh(event)); /* * This is the only place that the event/reply should be freed. * Anything wanting to hold onto the event data should have * already copied it into their own storage. */ mpr_free_reply(sc, data); } static void mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm) { mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if (cm->cm_reply) MPR_DPRINT_EVENT(sc, generic, (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); mpr_free_command(sc, cm); /* next, send a port enable */ mprsas_startup(sc); } /* * For both register_events and update_events, the caller supplies a bitmap * of events that it _wants_. These functions then turn that into a bitmask * suitable for the controller. */ int mpr_register_events(struct mpr_softc *sc, uint8_t *mask, mpr_evt_callback_t *cb, void *data, struct mpr_event_handle **handle) { struct mpr_event_handle *eh; int error = 0; eh = malloc(sizeof(struct mpr_event_handle), M_MPR, M_WAITOK|M_ZERO); if (!eh) { mpr_dprint(sc, MPR_EVENT|MPR_ERROR, "Cannot allocate event memory\n"); return (ENOMEM); } eh->callback = cb; eh->data = data; TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); if (mask != NULL) error = mpr_update_events(sc, eh, mask); *handle = eh; return (error); } int mpr_update_events(struct mpr_softc *sc, struct mpr_event_handle *handle, uint8_t *mask) { MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; MPI2_EVENT_NOTIFICATION_REPLY *reply = NULL; struct mpr_command *cm = NULL; struct mpr_event_handle *eh; int error, i; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); if ((mask != NULL) && (handle != NULL)) bcopy(mask, &handle->mask[0], 16); memset(sc->event_mask, 0xff, 16); TAILQ_FOREACH(eh, &sc->event_list, eh_list) { for (i = 0; i < 16; i++) sc->event_mask[i] &= ~eh->mask[i]; } if ((cm = mpr_alloc_command(sc)) == NULL) return (EBUSY); evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; evtreq->MsgFlags = 0; evtreq->SASBroadcastPrimitiveMasks = 0; #ifdef MPR_DEBUG_ALL_EVENTS { u_char fullmask[16]; memset(fullmask, 0x00, 16); bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); } #else bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); #endif cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; error = mpr_request_polled(sc, &cm); if (cm != NULL) reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; if ((reply == NULL) || (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) error = ENXIO; if (reply) MPR_DPRINT_EVENT(sc, generic, reply); mpr_dprint(sc, MPR_TRACE, "%s finished error %d\n", __func__, error); if (cm != NULL) mpr_free_command(sc, cm); return (error); } static int mpr_reregister_events(struct mpr_softc *sc) { MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; struct mpr_command *cm; struct mpr_event_handle *eh; int error, i; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); /* first, reregister events */ memset(sc->event_mask, 0xff, 16); TAILQ_FOREACH(eh, &sc->event_list, eh_list) { for (i = 0; i < 16; i++) sc->event_mask[i] &= ~eh->mask[i]; } if ((cm = mpr_alloc_command(sc)) == NULL) return (EBUSY); evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; evtreq->MsgFlags = 0; evtreq->SASBroadcastPrimitiveMasks = 0; #ifdef MPR_DEBUG_ALL_EVENTS { u_char fullmask[16]; memset(fullmask, 0x00, 16); bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); } #else bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); #endif cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; cm->cm_complete = mpr_reregister_events_complete; error = mpr_map_command(sc, cm); mpr_dprint(sc, MPR_TRACE, "%s finished with error %d\n", __func__, error); return (error); } int mpr_deregister_events(struct mpr_softc *sc, struct mpr_event_handle *handle) { TAILQ_REMOVE(&sc->event_list, handle, eh_list); free(handle, M_MPR); return (mpr_update_events(sc, NULL, NULL)); } /** * mpr_build_nvme_prp - This function is called for NVMe end devices to build a * native SGL (NVMe PRP). The native SGL is built starting in the first PRP entry * of the NVMe message (PRP1). If the data buffer is small enough to be described * entirely using PRP1, then PRP2 is not used. If needed, PRP2 is used to * describe a larger data buffer. If the data buffer is too large to describe * using the two PRP entriess inside the NVMe message, then PRP1 describes the * first data memory segment, and PRP2 contains a pointer to a PRP list located * elsewhere in memory to describe the remaining data memory segments. The PRP * list will be contiguous. * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP * consists of a list of PRP entries to describe a number of noncontigous * physical memory segments as a single memory buffer, just as a SGL does. Note * however, that this function is only used by the IOCTL call, so the memory * given will be guaranteed to be contiguous. There is no need to translate * non-contiguous SGL into a PRP in this case. All PRPs will describe contiguous * space that is one page size each. * * Each NVMe message contains two PRP entries. The first (PRP1) either contains * a PRP list pointer or a PRP element, depending upon the command. PRP2 contains * the second PRP element if the memory being described fits within 2 PRP * entries, or a PRP list pointer if the PRP spans more than two entries. * * A PRP list pointer contains the address of a PRP list, structured as a linear * array of PRP entries. Each PRP entry in this list describes a segment of * physical memory. * * Each 64-bit PRP entry comprises an address and an offset field. The address * always points to the beginning of a PAGE_SIZE physical memory page, and the * offset describes where within that page the memory segment begins. Only the * first element in a PRP list may contain a non-zero offest, implying that all * memory segments following the first begin at the start of a PAGE_SIZE page. * * Each PRP element normally describes a chunck of PAGE_SIZE physical memory, * with exceptions for the first and last elements in the list. If the memory * being described by the list begins at a non-zero offset within the first page, * then the first PRP element will contain a non-zero offset indicating where the * region begins within the page. The last memory segment may end before the end * of the PAGE_SIZE segment, depending upon the overall size of the memory being * described by the PRP list. * * Since PRP entries lack any indication of size, the overall data buffer length * is used to determine where the end of the data memory buffer is located, and * how many PRP entries are required to describe it. * * Returns nothing. */ void mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm, Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, void *data, uint32_t data_in_sz, uint32_t data_out_sz) { int prp_size = PRP_ENTRY_SIZE; uint64_t *prp_entry, *prp1_entry, *prp2_entry; uint64_t *prp_entry_phys, *prp_page, *prp_page_phys; uint32_t offset, entry_len, page_mask_result, page_mask; bus_addr_t paddr; size_t length; struct mpr_prp_page *prp_page_info = NULL; /* * Not all commands require a data transfer. If no data, just return * without constructing any PRP. */ if (!data_in_sz && !data_out_sz) return; /* * Set pointers to PRP1 and PRP2, which are in the NVMe command. PRP1 is * located at a 24 byte offset from the start of the NVMe command. Then * set the current PRP entry pointer to PRP1. */ prp1_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + NVME_CMD_PRP1_OFFSET); prp2_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + NVME_CMD_PRP2_OFFSET); prp_entry = prp1_entry; /* * For the PRP entries, use the specially allocated buffer of * contiguous memory. PRP Page allocation failures should not happen * because there should be enough PRP page buffers to account for the * possible NVMe QDepth. */ prp_page_info = mpr_alloc_prp_page(sc); KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " "used for building a native NVMe SGL.\n", __func__)); prp_page = (uint64_t *)prp_page_info->prp_page; prp_page_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; /* * Insert the allocated PRP page into the command's PRP page list. This * will be freed when the command is freed. */ TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); /* * Check if we are within 1 entry of a page boundary we don't want our * first entry to be a PRP List entry. */ page_mask = PAGE_SIZE - 1; page_mask_result = (uintptr_t)((uint8_t *)prp_page + prp_size) & page_mask; if (!page_mask_result) { /* Bump up to next page boundary. */ prp_page = (uint64_t *)((uint8_t *)prp_page + prp_size); prp_page_phys = (uint64_t *)((uint8_t *)prp_page_phys + prp_size); } /* * Set PRP physical pointer, which initially points to the current PRP * DMA memory page. */ prp_entry_phys = prp_page_phys; /* Get physical address and length of the data buffer. */ paddr = (bus_addr_t)data; if (data_in_sz) length = data_in_sz; else length = data_out_sz; /* Loop while the length is not zero. */ while (length) { /* * Check if we need to put a list pointer here if we are at page * boundary - prp_size (8 bytes). */ page_mask_result = (uintptr_t)((uint8_t *)prp_entry_phys + prp_size) & page_mask; if (!page_mask_result) { /* * This is the last entry in a PRP List, so we need to * put a PRP list pointer here. What this does is: * - bump the current memory pointer to the next * address, which will be the next full page. * - set the PRP Entry to point to that page. This is * now the PRP List pointer. * - bump the PRP Entry pointer the start of the next * page. Since all of this PRP memory is contiguous, * no need to get a new page - it's just the next * address. */ prp_entry_phys++; *prp_entry = htole64((uint64_t)(uintptr_t)prp_entry_phys); prp_entry++; } /* Need to handle if entry will be part of a page. */ offset = (uint32_t)paddr & page_mask; entry_len = PAGE_SIZE - offset; if (prp_entry == prp1_entry) { /* * Must fill in the first PRP pointer (PRP1) before * moving on. */ *prp1_entry = htole64((uint64_t)paddr); /* * Now point to the second PRP entry within the * command (PRP2). */ prp_entry = prp2_entry; } else if (prp_entry == prp2_entry) { /* * Should the PRP2 entry be a PRP List pointer or just a * regular PRP pointer? If there is more than one more * page of data, must use a PRP List pointer. */ if (length > PAGE_SIZE) { /* * PRP2 will contain a PRP List pointer because * more PRP's are needed with this command. The * list will start at the beginning of the * contiguous buffer. */ *prp2_entry = htole64( (uint64_t)(uintptr_t)prp_entry_phys); /* * The next PRP Entry will be the start of the * first PRP List. */ prp_entry = prp_page; } else { /* * After this, the PRP Entries are complete. * This command uses 2 PRP's and no PRP list. */ *prp2_entry = htole64((uint64_t)paddr); } } else { /* * Put entry in list and bump the addresses. * * After PRP1 and PRP2 are filled in, this will fill in * all remaining PRP entries in a PRP List, one per each * time through the loop. */ *prp_entry = htole64((uint64_t)paddr); prp_entry++; prp_entry_phys++; } /* * Bump the phys address of the command's data buffer by the * entry_len. */ paddr += entry_len; /* Decrement length accounting for last partial page. */ if (entry_len > length) length = 0; else length -= entry_len; } } /* * mpr_check_pcie_native_sgl - This function is called for PCIe end devices to * determine if the driver needs to build a native SGL. If so, that native SGL * is built in the contiguous buffers allocated especially for PCIe SGL * creation. If the driver will not build a native SGL, return TRUE and a * normal IEEE SGL will be built. Currently this routine supports NVMe devices * only. * * Returns FALSE (0) if native SGL was built, TRUE (1) if no SGL was built. */ static int mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm, bus_dma_segment_t *segs, int segs_left) { uint32_t i, sge_dwords, length, offset, entry_len; uint32_t num_entries, buff_len = 0, sges_in_segment; uint32_t page_mask, page_mask_result, *curr_buff; uint32_t *ptr_sgl, *ptr_first_sgl, first_page_offset; uint32_t first_page_data_size, end_residual; uint64_t *msg_phys; bus_addr_t paddr; int build_native_sgl = 0, first_prp_entry; int prp_size = PRP_ENTRY_SIZE; Mpi25IeeeSgeChain64_t *main_chain_element = NULL; struct mpr_prp_page *prp_page_info = NULL; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); /* * Add up the sizes of each segment length to get the total transfer * size, which will be checked against the Maximum Data Transfer Size. * If the data transfer length exceeds the MDTS for this device, just * return 1 so a normal IEEE SGL will be built. F/W will break the I/O * up into multiple I/O's. [nvme_mdts = 0 means unlimited] */ for (i = 0; i < segs_left; i++) buff_len += htole32(segs[i].ds_len); if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS)) return 1; /* Create page_mask (to get offset within page) */ page_mask = PAGE_SIZE - 1; /* * Check if the number of elements exceeds the max number that can be * put in the main message frame (H/W can only translate an SGL that * is contained entirely in the main message frame). */ sges_in_segment = (sc->facts->IOCRequestFrameSize - offsetof(Mpi25SCSIIORequest_t, SGL)) / sizeof(MPI25_SGE_IO_UNION); if (segs_left > sges_in_segment) build_native_sgl = 1; else { /* * NVMe uses one PRP for each physical page (or part of physical * page). * if 4 pages or less then IEEE is OK * if > 5 pages then we need to build a native SGL * if > 4 and <= 5 pages, then check the physical address of * the first SG entry, then if this first size in the page * is >= the residual beyond 4 pages then use IEEE, * otherwise use native SGL */ if (buff_len > (PAGE_SIZE * 5)) build_native_sgl = 1; else if ((buff_len > (PAGE_SIZE * 4)) && (buff_len <= (PAGE_SIZE * 5)) ) { msg_phys = (uint64_t *)segs[0].ds_addr; first_page_offset = ((uint32_t)(uint64_t)(uintptr_t)msg_phys & page_mask); first_page_data_size = PAGE_SIZE - first_page_offset; end_residual = buff_len % PAGE_SIZE; /* * If offset into first page pushes the end of the data * beyond end of the 5th page, we need the extra PRP * list. */ if (first_page_data_size < end_residual) build_native_sgl = 1; /* * Check if first SG entry size is < residual beyond 4 * pages. */ if (htole32(segs[0].ds_len) < (buff_len - (PAGE_SIZE * 4))) build_native_sgl = 1; } } /* check if native SGL is needed */ if (!build_native_sgl) return 1; /* * Native SGL is needed. * Put a chain element in main message frame that points to the first * chain buffer. * * NOTE: The ChainOffset field must be 0 when using a chain pointer to * a native SGL. */ /* Set main message chain element pointer */ main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge; /* * For NVMe the chain element needs to be the 2nd SGL entry in the main * message. */ main_chain_element = (Mpi25IeeeSgeChain64_t *) ((uint8_t *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); /* * For the PRP entries, use the specially allocated buffer of * contiguous memory. PRP Page allocation failures should not happen * because there should be enough PRP page buffers to account for the * possible NVMe QDepth. */ prp_page_info = mpr_alloc_prp_page(sc); KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " "used for building a native NVMe SGL.\n", __func__)); curr_buff = (uint32_t *)prp_page_info->prp_page; msg_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; /* * Insert the allocated PRP page into the command's PRP page list. This * will be freed when the command is freed. */ TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); /* * Check if we are within 1 entry of a page boundary we don't want our * first entry to be a PRP List entry. */ page_mask_result = (uintptr_t)((uint8_t *)curr_buff + prp_size) & page_mask; if (!page_mask_result) { /* Bump up to next page boundary. */ curr_buff = (uint32_t *)((uint8_t *)curr_buff + prp_size); msg_phys = (uint64_t *)((uint8_t *)msg_phys + prp_size); } /* Fill in the chain element and make it an NVMe segment type. */ main_chain_element->Address.High = htole32((uint32_t)((uint64_t)(uintptr_t)msg_phys >> 32)); main_chain_element->Address.Low = htole32((uint32_t)(uintptr_t)msg_phys); main_chain_element->NextChainOffset = 0; main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; /* Set SGL pointer to start of contiguous PCIe buffer. */ ptr_sgl = curr_buff; sge_dwords = 2; num_entries = 0; /* * NVMe has a very convoluted PRP format. One PRP is required for each * page or partial page. We need to split up OS SG entries if they are * longer than one page or cross a page boundary. We also have to insert * a PRP list pointer entry as the last entry in each physical page of * the PRP list. * * NOTE: The first PRP "entry" is actually placed in the first SGL entry * in the main message in IEEE 64 format. The 2nd entry in the main * message is the chain element, and the rest of the PRP entries are * built in the contiguous PCIe buffer. */ first_prp_entry = 1; ptr_first_sgl = (uint32_t *)cm->cm_sge; for (i = 0; i < segs_left; i++) { /* Get physical address and length of this SG entry. */ paddr = segs[i].ds_addr; length = segs[i].ds_len; /* * Check whether a given SGE buffer lies on a non-PAGED * boundary if this is not the first page. If so, this is not * expected so have FW build the SGL. */ if ((i != 0) && (((uint32_t)paddr & page_mask) != 0)) { mpr_dprint(sc, MPR_ERROR, "Unaligned SGE while " "building NVMe PRPs, low address is 0x%x\n", (uint32_t)paddr); return 1; } /* Apart from last SGE, if any other SGE boundary is not page * aligned then it means that hole exists. Existence of hole * leads to data corruption. So fallback to IEEE SGEs. */ if (i != (segs_left - 1)) { if (((uint32_t)paddr + length) & page_mask) { mpr_dprint(sc, MPR_ERROR, "Unaligned SGE " "boundary while building NVMe PRPs, low " "address: 0x%x and length: %u\n", (uint32_t)paddr, length); return 1; } } /* Loop while the length is not zero. */ while (length) { /* * Check if we need to put a list pointer here if we are * at page boundary - prp_size. */ page_mask_result = (uintptr_t)((uint8_t *)ptr_sgl + prp_size) & page_mask; if (!page_mask_result) { /* * Need to put a PRP list pointer here. */ msg_phys = (uint64_t *)((uint8_t *)msg_phys + prp_size); *ptr_sgl = htole32((uintptr_t)msg_phys); *(ptr_sgl+1) = htole32((uint64_t)(uintptr_t) msg_phys >> 32); ptr_sgl += sge_dwords; num_entries++; } /* Need to handle if entry will be part of a page. */ offset = (uint32_t)paddr & page_mask; entry_len = PAGE_SIZE - offset; if (first_prp_entry) { /* * Put IEEE entry in first SGE in main message. * (Simple element, System addr, not end of * list.) */ *ptr_first_sgl = htole32((uint32_t)paddr); *(ptr_first_sgl + 1) = htole32((uint32_t)((uint64_t)paddr >> 32)); *(ptr_first_sgl + 2) = htole32(entry_len); *(ptr_first_sgl + 3) = 0; /* No longer the first PRP entry. */ first_prp_entry = 0; } else { /* Put entry in list. */ *ptr_sgl = htole32((uint32_t)paddr); *(ptr_sgl + 1) = htole32((uint32_t)((uint64_t)paddr >> 32)); /* Bump ptr_sgl, msg_phys, and num_entries. */ ptr_sgl += sge_dwords; msg_phys = (uint64_t *)((uint8_t *)msg_phys + prp_size); num_entries++; } /* Bump the phys address by the entry_len. */ paddr += entry_len; /* Decrement length accounting for last partial page. */ if (entry_len > length) length = 0; else length -= entry_len; } } /* Set chain element Length. */ main_chain_element->Length = htole32(num_entries * prp_size); /* Return 0, indicating we built a native SGL. */ return 0; } /* * Add a chain element as the next SGE for the specified command. * Reset cm_sge and cm_sgesize to indicate all the available space. Chains are * only required for IEEE commands. Therefore there is no code for commands * that have the MPR_CM_FLAGS_SGE_SIMPLE flag set (and those commands * shouldn't be requesting chains). */ static int mpr_add_chain(struct mpr_command *cm, int segsleft) { struct mpr_softc *sc = cm->cm_sc; MPI2_REQUEST_HEADER *req; MPI25_IEEE_SGE_CHAIN64 *ieee_sgc; struct mpr_chain *chain; int sgc_size, current_segs, rem_segs, segs_per_frame; uint8_t next_chain_offset = 0; /* * Fail if a command is requesting a chain for SIMPLE SGE's. For SAS3 * only IEEE commands should be requesting chains. Return some error * code other than 0. */ if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) { mpr_dprint(sc, MPR_ERROR, "A chain element cannot be added to " "an MPI SGL.\n"); return(ENOBUFS); } sgc_size = sizeof(MPI25_IEEE_SGE_CHAIN64); if (cm->cm_sglsize < sgc_size) panic("MPR: Need SGE Error Code\n"); chain = mpr_alloc_chain(cm->cm_sc); if (chain == NULL) return (ENOBUFS); /* * Note: a double-linked list is used to make it easier to walk for * debugging. */ TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); /* * Need to know if the number of frames left is more than 1 or not. If * more than 1 frame is required, NextChainOffset will need to be set, * which will just be the last segment of the frame. */ rem_segs = 0; if (cm->cm_sglsize < (sgc_size * segsleft)) { /* * rem_segs is the number of segements remaining after the * segments that will go into the current frame. Since it is * known that at least one more frame is required, account for * the chain element. To know if more than one more frame is * required, just check if there will be a remainder after using * the current frame (with this chain) and the next frame. If * so the NextChainOffset must be the last element of the next * frame. */ current_segs = (cm->cm_sglsize / sgc_size) - 1; rem_segs = segsleft - current_segs; segs_per_frame = sc->chain_frame_size / sgc_size; if (rem_segs > segs_per_frame) { next_chain_offset = segs_per_frame - 1; } } ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain; ieee_sgc->Length = next_chain_offset ? htole32((uint32_t)sc->chain_frame_size) : htole32((uint32_t)rem_segs * (uint32_t)sgc_size); ieee_sgc->NextChainOffset = next_chain_offset; ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); ieee_sgc->Address.Low = htole32(chain->chain_busaddr); ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32); cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple; req = (MPI2_REQUEST_HEADER *)cm->cm_req; req->ChainOffset = (sc->chain_frame_size - sgc_size) >> 4; cm->cm_sglsize = sc->chain_frame_size; return (0); } /* * Add one scatter-gather element to the scatter-gather list for a command. * Maintain cm_sglsize and cm_sge as the remaining size and pointer to the * next SGE to fill in, respectively. In Gen3, the MPI SGL does not have a * chain, so don't consider any chain additions. */ int mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len, int segsleft) { uint32_t saved_buf_len, saved_address_low, saved_address_high; u32 sge_flags; /* * case 1: >=1 more segment, no room for anything (error) * case 2: 1 more segment and enough room for it */ if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) { mpr_dprint(cm->cm_sc, MPR_ERROR, "%s: warning: Not enough room for MPI SGL in frame.\n", __func__); return(ENOBUFS); } KASSERT(segsleft == 1, ("segsleft cannot be more than 1 for an MPI SGL; segsleft = %d\n", segsleft)); /* * There is one more segment left to add for the MPI SGL and there is * enough room in the frame to add it. This is the normal case because * MPI SGL's don't have chains, otherwise something is wrong. * * If this is a bi-directional request, need to account for that * here. Save the pre-filled sge values. These will be used * either for the 2nd SGL or for a single direction SGL. If * cm_out_len is non-zero, this is a bi-directional request, so * fill in the OUT SGL first, then the IN SGL, otherwise just * fill in the IN SGL. Note that at this time, when filling in * 2 SGL's for a bi-directional request, they both use the same * DMA buffer (same cm command). */ saved_buf_len = sge->FlagsLength & 0x00FFFFFF; saved_address_low = sge->Address.Low; saved_address_high = sge->Address.High; if (cm->cm_out_len) { sge->FlagsLength = cm->cm_out_len | ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC | MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << MPI2_SGE_FLAGS_SHIFT); cm->cm_sglsize -= len; /* Endian Safe code */ sge_flags = sge->FlagsLength; sge->FlagsLength = htole32(sge_flags); sge->Address.High = htole32(sge->Address.High); sge->Address.Low = htole32(sge->Address.Low); bcopy(sge, cm->cm_sge, len); cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); } sge->FlagsLength = saved_buf_len | ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << MPI2_SGE_FLAGS_SHIFT); if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) { sge->FlagsLength |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) << MPI2_SGE_FLAGS_SHIFT); } else { sge->FlagsLength |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) << MPI2_SGE_FLAGS_SHIFT); } sge->Address.Low = saved_address_low; sge->Address.High = saved_address_high; cm->cm_sglsize -= len; /* Endian Safe code */ sge_flags = sge->FlagsLength; sge->FlagsLength = htole32(sge_flags); sge->Address.High = htole32(sge->Address.High); sge->Address.Low = htole32(sge->Address.Low); bcopy(sge, cm->cm_sge, len); cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); return (0); } /* * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter- * gather list for a command. Maintain cm_sglsize and cm_sge as the * remaining size and pointer to the next SGE to fill in, respectively. */ int mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft) { MPI2_IEEE_SGE_SIMPLE64 *sge = sgep; int error, ieee_sge_size = sizeof(MPI25_SGE_IO_UNION); uint32_t saved_buf_len, saved_address_low, saved_address_high; uint32_t sge_length; /* * case 1: No room for chain or segment (error). * case 2: Two or more segments left but only room for chain. * case 3: Last segment and room for it, so set flags. */ /* * There should be room for at least one element, or there is a big * problem. */ if (cm->cm_sglsize < ieee_sge_size) panic("MPR: Need SGE Error Code\n"); if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) { if ((error = mpr_add_chain(cm, segsleft)) != 0) return (error); } if (segsleft == 1) { /* * If this is a bi-directional request, need to account for that * here. Save the pre-filled sge values. These will be used * either for the 2nd SGL or for a single direction SGL. If * cm_out_len is non-zero, this is a bi-directional request, so * fill in the OUT SGL first, then the IN SGL, otherwise just * fill in the IN SGL. Note that at this time, when filling in * 2 SGL's for a bi-directional request, they both use the same * DMA buffer (same cm command). */ saved_buf_len = sge->Length; saved_address_low = sge->Address.Low; saved_address_high = sge->Address.High; if (cm->cm_out_len) { sge->Length = cm->cm_out_len; sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); cm->cm_sglsize -= ieee_sge_size; /* Endian Safe code */ sge_length = sge->Length; sge->Length = htole32(sge_length); sge->Address.High = htole32(sge->Address.High); sge->Address.Low = htole32(sge->Address.Low); bcopy(sgep, cm->cm_sge, ieee_sge_size); cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + ieee_sge_size); } sge->Length = saved_buf_len; sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | MPI25_IEEE_SGE_FLAGS_END_OF_LIST); sge->Address.Low = saved_address_low; sge->Address.High = saved_address_high; } cm->cm_sglsize -= ieee_sge_size; /* Endian Safe code */ sge_length = sge->Length; sge->Length = htole32(sge_length); sge->Address.High = htole32(sge->Address.High); sge->Address.Low = htole32(sge->Address.Low); bcopy(sgep, cm->cm_sge, ieee_sge_size); cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + ieee_sge_size); return (0); } /* * Add one dma segment to the scatter-gather list for a command. */ int mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags, int segsleft) { MPI2_SGE_SIMPLE64 sge; MPI2_IEEE_SGE_SIMPLE64 ieee_sge; if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) { ieee_sge.Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); ieee_sge.Length = len; mpr_from_u64(pa, &ieee_sge.Address); return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft)); } else { /* * This driver always uses 64-bit address elements for * simplicity. */ flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_64_BIT_ADDRESSING; /* Set Endian safe macro in mpr_push_sge */ sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT); mpr_from_u64(pa, &sge.Address); return (mpr_push_sge(cm, &sge, sizeof sge, segsleft)); } } static void mpr_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct mpr_softc *sc; struct mpr_command *cm; u_int i, dir, sflags; cm = (struct mpr_command *)arg; sc = cm->cm_sc; /* * In this case, just print out a warning and let the chip tell the * user they did the wrong thing. */ if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { mpr_dprint(sc, MPR_ERROR, "%s: warning: busdma returned %d " "segments, more than the %d allowed\n", __func__, nsegs, cm->cm_max_segs); } /* * Set up DMA direction flags. Bi-directional requests are also handled * here. In that case, both direction flags will be set. */ sflags = 0; if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) { /* * We have to add a special case for SMP passthrough, there * is no easy way to generically handle it. The first * S/G element is used for the command (therefore the * direction bit needs to be set). The second one is used * for the reply. We'll leave it to the caller to make * sure we only have two buffers. */ /* * Even though the busdma man page says it doesn't make * sense to have both direction flags, it does in this case. * We have one s/g element being accessed in each direction. */ dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD; /* * Set the direction flag on the first buffer in the SMP * passthrough request. We'll clear it for the second one. */ sflags |= MPI2_SGE_FLAGS_DIRECTION | MPI2_SGE_FLAGS_END_OF_BUFFER; } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) { sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC; dir = BUS_DMASYNC_PREWRITE; } else dir = BUS_DMASYNC_PREREAD; /* Check if a native SG list is needed for an NVMe PCIe device. */ if (cm->cm_targ && cm->cm_targ->is_nvme && mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) { /* A native SG list was built, skip to end. */ goto out; } for (i = 0; i < nsegs; i++) { if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) { sflags &= ~MPI2_SGE_FLAGS_DIRECTION; } error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, sflags, nsegs - i); if (error != 0) { /* Resource shortage, roll back! */ if (ratecheck(&sc->lastfail, &mpr_chainfail_interval)) mpr_dprint(sc, MPR_INFO, "Out of chain frames, " "consider increasing hw.mpr.max_chains.\n"); cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED; mpr_complete_command(sc, cm); return; } } out: bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); mpr_enqueue_request(sc, cm); return; } static void mpr_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, int error) { mpr_data_cb(arg, segs, nsegs, error); } /* * This is the routine to enqueue commands ansynchronously. * Note that the only error path here is from bus_dmamap_load(), which can * return EINPROGRESS if it is waiting for resources. Other than this, it's * assumed that if you have a command in-hand, then you have enough credits * to use it. */ int mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm) { int error = 0; if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) { error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, &cm->cm_uio, mpr_data_cb2, cm, 0); } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) { error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap, cm->cm_data, mpr_data_cb, cm, 0); } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0); } else { /* Add a zero-length element as needed */ if (cm->cm_sge != NULL) mpr_add_dmaseg(cm, 0, 0, 0, 1); mpr_enqueue_request(sc, cm); } return (error); } /* * This is the routine to enqueue commands synchronously. An error of * EINPROGRESS from mpr_map_command() is ignored since the command will * be executed and enqueued automatically. Other errors come from msleep(). */ int mpr_wait_command(struct mpr_softc *sc, struct mpr_command **cmp, int timeout, int sleep_flag) { int error, rc; struct timeval cur_time, start_time; struct mpr_command *cm = *cmp; if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) return EBUSY; cm->cm_complete = NULL; cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED); error = mpr_map_command(sc, cm); if ((error != 0) && (error != EINPROGRESS)) return (error); // Check for context and wait for 50 mSec at a time until time has // expired or the command has finished. If msleep can't be used, need // to poll. #if __FreeBSD_version >= 1000029 if (curthread->td_no_sleeping) #else //__FreeBSD_version < 1000029 if (curthread->td_pflags & TDP_NOSLEEPING) #endif //__FreeBSD_version >= 1000029 sleep_flag = NO_SLEEP; getmicrouptime(&start_time); if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) { error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz); if (error == EWOULDBLOCK) { /* * Record the actual elapsed time in the case of a * timeout for the message below. */ getmicrouptime(&cur_time); timevalsub(&cur_time, &start_time); } } else { while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { mpr_intr_locked(sc); if (sleep_flag == CAN_SLEEP) pause("mprwait", hz/20); else DELAY(50000); getmicrouptime(&cur_time); timevalsub(&cur_time, &start_time); if (cur_time.tv_sec > timeout) { error = EWOULDBLOCK; break; } } } if (error == EWOULDBLOCK) { mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s, timeout=%d," " elapsed=%jd\n", __func__, timeout, (intmax_t)cur_time.tv_sec); rc = mpr_reinit(sc); mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : "failed"); if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { /* * Tell the caller that we freed the command in a * reinit. */ *cmp = NULL; } error = ETIMEDOUT; } return (error); } /* * This is the routine to enqueue a command synchonously and poll for * completion. Its use should be rare. */ int mpr_request_polled(struct mpr_softc *sc, struct mpr_command **cmp) { int error, rc; struct timeval cur_time, start_time; struct mpr_command *cm = *cmp; error = 0; cm->cm_flags |= MPR_CM_FLAGS_POLLED; cm->cm_complete = NULL; mpr_map_command(sc, cm); getmicrouptime(&start_time); while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { mpr_intr_locked(sc); if (mtx_owned(&sc->mpr_mtx)) msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprpoll", hz/20); else pause("mprpoll", hz/20); /* * Check for real-time timeout and fail if more than 60 seconds. */ getmicrouptime(&cur_time); timevalsub(&cur_time, &start_time); if (cur_time.tv_sec > 60) { mpr_dprint(sc, MPR_FAULT, "polling failed\n"); error = ETIMEDOUT; break; } } if (error) { mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__); rc = mpr_reinit(sc); mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : "failed"); if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { /* * Tell the caller that we freed the command in a * reinit. */ *cmp = NULL; } } return (error); } /* * The MPT driver had a verbose interface for config pages. In this driver, * reduce it to much simpler terms, similar to the Linux driver. */ int mpr_read_config_page(struct mpr_softc *sc, struct mpr_config_params *params) { MPI2_CONFIG_REQUEST *req; struct mpr_command *cm; int error; if (sc->mpr_flags & MPR_FLAGS_BUSY) { return (EBUSY); } cm = mpr_alloc_command(sc); if (cm == NULL) { return (EBUSY); } req = (MPI2_CONFIG_REQUEST *)cm->cm_req; req->Function = MPI2_FUNCTION_CONFIG; req->Action = params->action; req->SGLFlags = 0; req->ChainOffset = 0; req->PageAddress = params->page_address; if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; hdr = ¶ms->hdr.Ext; req->ExtPageType = hdr->ExtPageType; req->ExtPageLength = hdr->ExtPageLength; req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; req->Header.PageLength = 0; /* Must be set to zero */ req->Header.PageNumber = hdr->PageNumber; req->Header.PageVersion = hdr->PageVersion; } else { MPI2_CONFIG_PAGE_HEADER *hdr; hdr = ¶ms->hdr.Struct; req->Header.PageType = hdr->PageType; req->Header.PageNumber = hdr->PageNumber; req->Header.PageLength = hdr->PageLength; req->Header.PageVersion = hdr->PageVersion; } cm->cm_data = params->buffer; cm->cm_length = params->length; if (cm->cm_data != NULL) { cm->cm_sge = &req->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; } else cm->cm_sge = NULL; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_complete_data = params; if (params->callback != NULL) { cm->cm_complete = mpr_config_complete; return (mpr_map_command(sc, cm)); } else { error = mpr_wait_command(sc, &cm, 0, CAN_SLEEP); if (error) { mpr_dprint(sc, MPR_FAULT, "Error %d reading config page\n", error); if (cm != NULL) mpr_free_command(sc, cm); return (error); } mpr_config_complete(sc, cm); } return (0); } int mpr_write_config_page(struct mpr_softc *sc, struct mpr_config_params *params) { return (EINVAL); } static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm) { MPI2_CONFIG_REPLY *reply; struct mpr_config_params *params; MPR_FUNCTRACE(sc); params = cm->cm_complete_data; if (cm->cm_data != NULL) { bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); } /* * XXX KDM need to do more error recovery? This results in the * device in question not getting probed. */ if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { params->status = MPI2_IOCSTATUS_BUSY; goto done; } reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (reply == NULL) { params->status = MPI2_IOCSTATUS_BUSY; goto done; } params->status = reply->IOCStatus; if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { params->hdr.Ext.ExtPageType = reply->ExtPageType; params->hdr.Ext.ExtPageLength = reply->ExtPageLength; params->hdr.Ext.PageType = reply->Header.PageType; params->hdr.Ext.PageNumber = reply->Header.PageNumber; params->hdr.Ext.PageVersion = reply->Header.PageVersion; } else { params->hdr.Struct.PageType = reply->Header.PageType; params->hdr.Struct.PageNumber = reply->Header.PageNumber; params->hdr.Struct.PageLength = reply->Header.PageLength; params->hdr.Struct.PageVersion = reply->Header.PageVersion; } done: mpr_free_command(sc, cm); if (params->callback != NULL) params->callback(sc, params); return; } Index: head/sys/dev/mpr/mpr_mapping.c =================================================================== --- head/sys/dev/mpr/mpr_mapping.c (revision 327948) +++ head/sys/dev/mpr/mpr_mapping.c (revision 327949) @@ -1,3131 +1,3131 @@ /*- * Copyright (c) 2011-2015 LSI Corp. * Copyright (c) 2013-2016 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD */ #include __FBSDID("$FreeBSD$"); /* TODO Move headers to mprvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /** * _mapping_clear_map_entry - Clear a particular mapping entry. * @map_entry: map table entry * * Returns nothing. */ static inline void _mapping_clear_map_entry(struct dev_mapping_table *map_entry) { map_entry->physical_id = 0; map_entry->device_info = 0; map_entry->phy_bits = 0; map_entry->dpm_entry_num = MPR_DPM_BAD_IDX; map_entry->dev_handle = 0; map_entry->id = -1; map_entry->missing_count = 0; map_entry->init_complete = 0; map_entry->TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; } /** * _mapping_clear_enc_entry - Clear a particular enclosure table entry. * @enc_entry: enclosure table entry * * Returns nothing. */ static inline void _mapping_clear_enc_entry(struct enc_mapping_table *enc_entry) { enc_entry->enclosure_id = 0; enc_entry->start_index = MPR_MAPTABLE_BAD_IDX; enc_entry->phy_bits = 0; enc_entry->dpm_entry_num = MPR_DPM_BAD_IDX; enc_entry->enc_handle = 0; enc_entry->num_slots = 0; enc_entry->start_slot = 0; enc_entry->missing_count = 0; enc_entry->removal_flag = 0; enc_entry->skip_search = 0; enc_entry->init_complete = 0; } /** * _mapping_commit_enc_entry - write a particular enc entry in DPM page0. * @sc: per adapter object * @enc_entry: enclosure table entry * * Returns 0 for success, non-zero for failure. */ static int _mapping_commit_enc_entry(struct mpr_softc *sc, struct enc_mapping_table *et_entry) { Mpi2DriverMap0Entry_t *dpm_entry; struct dev_mapping_table *mt_entry; Mpi2ConfigReply_t mpi_reply; Mpi2DriverMappingPage0_t config_page; if (!sc->is_dpm_enable) return 0; memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t)); memcpy(&config_page.Header, (u8 *) sc->dpm_pg0, sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += et_entry->dpm_entry_num; dpm_entry->PhysicalIdentifier.Low = ( 0xFFFFFFFF & et_entry->enclosure_id); dpm_entry->PhysicalIdentifier.High = ( et_entry->enclosure_id >> 32); mt_entry = &sc->mapping_table[et_entry->start_index]; dpm_entry->DeviceIndex = htole16(mt_entry->id); dpm_entry->MappingInformation = et_entry->num_slots; dpm_entry->MappingInformation <<= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT; dpm_entry->MappingInformation |= et_entry->missing_count; dpm_entry->MappingInformation = htole16(dpm_entry->MappingInformation); dpm_entry->PhysicalBitsMapping = htole32(et_entry->phy_bits); dpm_entry->Reserved1 = 0; mpr_dprint(sc, MPR_MAPPING, "%s: Writing DPM entry %d for enclosure.\n", __func__, et_entry->dpm_entry_num); memcpy(&config_page.Entry, (u8 *)dpm_entry, sizeof(Mpi2DriverMap0Entry_t)); if (mpr_config_set_dpm_pg0(sc, &mpi_reply, &config_page, et_entry->dpm_entry_num)) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Write of DPM " "entry %d for enclosure failed.\n", __func__, et_entry->dpm_entry_num); dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); dpm_entry->PhysicalBitsMapping = le32toh(dpm_entry->PhysicalBitsMapping); return -1; } dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); dpm_entry->PhysicalBitsMapping = le32toh(dpm_entry->PhysicalBitsMapping); return 0; } /** * _mapping_commit_map_entry - write a particular map table entry in DPM page0. * @sc: per adapter object * @mt_entry: mapping table entry * * Returns 0 for success, non-zero for failure. */ static int _mapping_commit_map_entry(struct mpr_softc *sc, struct dev_mapping_table *mt_entry) { Mpi2DriverMap0Entry_t *dpm_entry; Mpi2ConfigReply_t mpi_reply; Mpi2DriverMappingPage0_t config_page; if (!sc->is_dpm_enable) return 0; /* * It's possible that this Map Entry points to a BAD DPM index. This * can happen if the Map Entry is a for a missing device and the DPM * entry that was being used by this device is now being used by some * new device. So, check for a BAD DPM index and just return if so. */ if (mt_entry->dpm_entry_num == MPR_DPM_BAD_IDX) { mpr_dprint(sc, MPR_MAPPING, "%s: DPM entry location for target " "%d is invalid. DPM will not be written.\n", __func__, mt_entry->id); return 0; } memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t)); memcpy(&config_page.Header, (u8 *)sc->dpm_pg0, sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *) sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry = dpm_entry + mt_entry->dpm_entry_num; dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF & mt_entry->physical_id); dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32); dpm_entry->DeviceIndex = htole16(mt_entry->id); dpm_entry->MappingInformation = htole16(mt_entry->missing_count); dpm_entry->PhysicalBitsMapping = 0; dpm_entry->Reserved1 = 0; memcpy(&config_page.Entry, (u8 *)dpm_entry, sizeof(Mpi2DriverMap0Entry_t)); mpr_dprint(sc, MPR_MAPPING, "%s: Writing DPM entry %d for target %d.\n", __func__, mt_entry->dpm_entry_num, mt_entry->id); if (mpr_config_set_dpm_pg0(sc, &mpi_reply, &config_page, mt_entry->dpm_entry_num)) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Write of DPM " "entry %d for target %d failed.\n", __func__, mt_entry->dpm_entry_num, mt_entry->id); dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); return -1; } dpm_entry->MappingInformation = le16toh(dpm_entry->MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); return 0; } /** * _mapping_get_ir_maprange - get start and end index for IR map range. * @sc: per adapter object * @start_idx: place holder for start index * @end_idx: place holder for end index * * The IR volumes can be mapped either at start or end of the mapping table * this function gets the detail of where IR volume mapping starts and ends * in the device mapping table * * Returns nothing. */ static void _mapping_get_ir_maprange(struct mpr_softc *sc, u32 *start_idx, u32 *end_idx) { u16 volume_mapping_flags; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; if (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { *start_idx = 0; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0) *start_idx = 1; } else *start_idx = sc->max_devices - sc->max_volumes; *end_idx = *start_idx + sc->max_volumes - 1; } /** * _mapping_get_enc_idx_from_id - get enclosure index from enclosure ID * @sc: per adapter object * @enc_id: enclosure logical identifier * * Returns the index of enclosure entry on success or bad index. */ static u8 _mapping_get_enc_idx_from_id(struct mpr_softc *sc, u64 enc_id, u64 phy_bits) { struct enc_mapping_table *et_entry; u8 enc_idx = 0; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) { et_entry = &sc->enclosure_table[enc_idx]; if ((et_entry->enclosure_id == le64toh(enc_id)) && (!et_entry->phy_bits || (et_entry->phy_bits & le32toh(phy_bits)))) return enc_idx; } return MPR_ENCTABLE_BAD_IDX; } /** * _mapping_get_enc_idx_from_handle - get enclosure index from handle * @sc: per adapter object * @enc_id: enclosure handle * * Returns the index of enclosure entry on success or bad index. */ static u8 _mapping_get_enc_idx_from_handle(struct mpr_softc *sc, u16 handle) { struct enc_mapping_table *et_entry; u8 enc_idx = 0; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) { et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->missing_count) continue; if (et_entry->enc_handle == handle) return enc_idx; } return MPR_ENCTABLE_BAD_IDX; } /** * _mapping_get_high_missing_et_idx - get missing enclosure index * @sc: per adapter object * * Search through the enclosure table and identifies the enclosure entry * with high missing count and returns it's index * * Returns the index of enclosure entry on success or bad index. */ static u8 _mapping_get_high_missing_et_idx(struct mpr_softc *sc) { struct enc_mapping_table *et_entry; u8 high_missing_count = 0; u8 enc_idx, high_idx = MPR_ENCTABLE_BAD_IDX; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) { et_entry = &sc->enclosure_table[enc_idx]; if ((et_entry->missing_count > high_missing_count) && !et_entry->skip_search) { high_missing_count = et_entry->missing_count; high_idx = enc_idx; } } return high_idx; } /** * _mapping_get_high_missing_mt_idx - get missing map table index * @sc: per adapter object * * Search through the map table and identifies the device entry * with high missing count and returns it's index * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_high_missing_mt_idx(struct mpr_softc *sc) { u32 map_idx, high_idx = MPR_MAPTABLE_BAD_IDX; u8 high_missing_count = 0; u32 start_idx, end_idx, start_idx_ir, end_idx_ir; struct dev_mapping_table *mt_entry; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); start_idx = 0; start_idx_ir = 0; end_idx_ir = 0; end_idx = sc->max_devices; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0) start_idx = 1; if (sc->ir_firmware) { _mapping_get_ir_maprange(sc, &start_idx_ir, &end_idx_ir); if (start_idx == start_idx_ir) start_idx = end_idx_ir + 1; else end_idx = start_idx_ir; } mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx < end_idx; map_idx++, mt_entry++) { if (mt_entry->missing_count > high_missing_count) { high_missing_count = mt_entry->missing_count; high_idx = map_idx; } } return high_idx; } /** * _mapping_get_ir_mt_idx_from_wwid - get map table index from volume WWID * @sc: per adapter object * @wwid: world wide unique ID of the volume * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_ir_mt_idx_from_wwid(struct mpr_softc *sc, u64 wwid) { u32 start_idx, end_idx, map_idx; struct dev_mapping_table *mt_entry; _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) if (mt_entry->physical_id == wwid) return map_idx; return MPR_MAPTABLE_BAD_IDX; } /** * _mapping_get_mt_idx_from_id - get map table index from a device ID * @sc: per adapter object * @dev_id: device identifer (SAS Address) * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_mt_idx_from_id(struct mpr_softc *sc, u64 dev_id) { u32 map_idx; struct dev_mapping_table *mt_entry; for (map_idx = 0; map_idx < sc->max_devices; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->physical_id == dev_id) return map_idx; } return MPR_MAPTABLE_BAD_IDX; } /** * _mapping_get_ir_mt_idx_from_handle - get map table index from volume handle * @sc: per adapter object * @wwid: volume device handle * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_ir_mt_idx_from_handle(struct mpr_softc *sc, u16 volHandle) { u32 start_idx, end_idx, map_idx; struct dev_mapping_table *mt_entry; _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) if (mt_entry->dev_handle == volHandle) return map_idx; return MPR_MAPTABLE_BAD_IDX; } /** * _mapping_get_mt_idx_from_handle - get map table index from handle * @sc: per adapter object * @dev_id: device handle * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_mt_idx_from_handle(struct mpr_softc *sc, u16 handle) { u32 map_idx; struct dev_mapping_table *mt_entry; for (map_idx = 0; map_idx < sc->max_devices; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->dev_handle == handle) return map_idx; } return MPR_MAPTABLE_BAD_IDX; } /** * _mapping_get_free_ir_mt_idx - get first free index for a volume * @sc: per adapter object * * Search through mapping table for free index for a volume and if no free * index then looks for a volume with high mapping index * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_free_ir_mt_idx(struct mpr_softc *sc) { u8 high_missing_count = 0; u32 start_idx, end_idx, map_idx; u32 high_idx = MPR_MAPTABLE_BAD_IDX; struct dev_mapping_table *mt_entry; /* * The IN_USE flag should be clear if the entry is available to use. * This flag is cleared on initialization and and when a volume is * deleted. All other times this flag should be set. If, for some * reason, a free entry cannot be found, look for the entry with the * highest missing count just in case there is one. */ _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) { if (!(mt_entry->device_info & MPR_MAP_IN_USE)) return map_idx; if (mt_entry->missing_count > high_missing_count) { high_missing_count = mt_entry->missing_count; high_idx = map_idx; } } if (high_idx == MPR_MAPTABLE_BAD_IDX) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Could not find a " "free entry in the mapping table for a Volume. The mapping " "table is probably corrupt.\n", __func__); } return high_idx; } /** * _mapping_get_free_mt_idx - get first free index for a device * @sc: per adapter object * @start_idx: offset in the table to start search * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_free_mt_idx(struct mpr_softc *sc, u32 start_idx) { u32 map_idx, max_idx = sc->max_devices; struct dev_mapping_table *mt_entry = &sc->mapping_table[start_idx]; u16 volume_mapping_flags; volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; if (sc->ir_firmware && (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING)) max_idx -= sc->max_volumes; for (map_idx = start_idx; map_idx < max_idx; map_idx++, mt_entry++) if (!(mt_entry->device_info & (MPR_MAP_IN_USE | MPR_DEV_RESERVED))) return map_idx; return MPR_MAPTABLE_BAD_IDX; } /** * _mapping_get_dpm_idx_from_id - get DPM index from ID * @sc: per adapter object * @id: volume WWID or enclosure ID or device ID * * Returns the index of DPM entry on success or bad index. */ static u16 _mapping_get_dpm_idx_from_id(struct mpr_softc *sc, u64 id, u32 phy_bits) { u16 entry_num; uint64_t PhysicalIdentifier; Mpi2DriverMap0Entry_t *dpm_entry; dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); PhysicalIdentifier = dpm_entry->PhysicalIdentifier.High; PhysicalIdentifier = (PhysicalIdentifier << 32) | dpm_entry->PhysicalIdentifier.Low; for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++, dpm_entry++) if ((id == PhysicalIdentifier) && (!phy_bits || !dpm_entry->PhysicalBitsMapping || (phy_bits & dpm_entry->PhysicalBitsMapping))) return entry_num; return MPR_DPM_BAD_IDX; } /** * _mapping_get_free_dpm_idx - get first available DPM index * @sc: per adapter object * * Returns the index of DPM entry on success or bad index. */ static u32 _mapping_get_free_dpm_idx(struct mpr_softc *sc) { u16 entry_num; Mpi2DriverMap0Entry_t *dpm_entry; u16 current_entry = MPR_DPM_BAD_IDX, missing_cnt, high_missing_cnt = 0; u64 physical_id; struct dev_mapping_table *mt_entry; u32 map_idx; for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += entry_num; missing_cnt = dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK; /* * If entry is used and not missing, then this entry can't be * used. Look at next one. */ if (sc->dpm_entry_used[entry_num] && !missing_cnt) continue; /* * If this entry is not used at all, then the missing count * doesn't matter. Just use this one. Otherwise, keep looking * and make sure the entry with the highest missing count is * used. */ if (!sc->dpm_entry_used[entry_num]) { current_entry = entry_num; break; } if ((current_entry == MPR_DPM_BAD_IDX) || (missing_cnt > high_missing_cnt)) { current_entry = entry_num; high_missing_cnt = missing_cnt; } } /* * If an entry has been found to use and it's already marked as used * it means that some device was already using this entry but it's * missing, and that means that the connection between the missing * device's DPM entry and the mapping table needs to be cleared. To do * this, use the Physical ID of the old device still in the DPM entry * to find its mapping table entry, then mark its DPM entry as BAD. */ if ((current_entry != MPR_DPM_BAD_IDX) && sc->dpm_entry_used[current_entry]) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += current_entry; physical_id = dpm_entry->PhysicalIdentifier.High; physical_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; map_idx = _mapping_get_mt_idx_from_id(sc, physical_id); if (map_idx != MPR_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[map_idx]; mt_entry->dpm_entry_num = MPR_DPM_BAD_IDX; } } return current_entry; } /** * _mapping_update_ir_missing_cnt - Updates missing count for a volume * @sc: per adapter object * @map_idx: map table index of the volume * @element: IR configuration change element * @wwid: IR volume ID. * * Updates the missing count in the map table and in the DPM entry for a volume * * Returns nothing. */ static void _mapping_update_ir_missing_cnt(struct mpr_softc *sc, u32 map_idx, Mpi2EventIrConfigElement_t *element, u64 wwid) { struct dev_mapping_table *mt_entry; u8 missing_cnt, reason = element->ReasonCode, update_dpm = 1; u16 dpm_idx; Mpi2DriverMap0Entry_t *dpm_entry; /* * Depending on the reason code, update the missing count. Always set * the init_complete flag when here, so just do it first. That flag is * used for volumes to make sure that the DPM entry has been updated. * When a volume is deleted, clear the map entry's IN_USE flag so that * the entry can be used again if another volume is created. Also clear * its dev_handle entry so that other functions can't find this volume * by the handle, since it's not defined any longer. */ mt_entry = &sc->mapping_table[map_idx]; mt_entry->init_complete = 1; if ((reason == MPI2_EVENT_IR_CHANGE_RC_ADDED) || (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED)) { mt_entry->missing_count = 0; } else if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) { if (mt_entry->missing_count < MPR_MAX_MISSING_COUNT) mt_entry->missing_count++; mt_entry->device_info &= ~MPR_MAP_IN_USE; mt_entry->dev_handle = 0; } /* * If persistent mapping is enabled, update the DPM with the new missing * count for the volume. If the DPM index is bad, get a free one. If * it's bad for a volume that's being deleted do nothing because that * volume doesn't have a DPM entry. */ if (!sc->is_dpm_enable) return; dpm_idx = mt_entry->dpm_entry_num; if (dpm_idx == MPR_DPM_BAD_IDX) { if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) { mpr_dprint(sc, MPR_MAPPING, "%s: Volume being deleted " "is not in DPM so DPM missing count will not be " "updated.\n", __func__); return; } } if (dpm_idx == MPR_DPM_BAD_IDX) dpm_idx = _mapping_get_free_dpm_idx(sc); /* * Got the DPM entry for the volume or found a free DPM entry if this is * a new volume. Check if the current information is outdated. */ if (dpm_idx != MPR_DPM_BAD_IDX) { dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += dpm_idx; missing_cnt = dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK; if ((mt_entry->physical_id == le64toh(((u64)dpm_entry->PhysicalIdentifier.High << 32) | (u64)dpm_entry->PhysicalIdentifier.Low)) && (missing_cnt == mt_entry->missing_count)) { mpr_dprint(sc, MPR_MAPPING, "%s: DPM entry for volume " "with target ID %d does not require an update.\n", __func__, mt_entry->id); update_dpm = 0; } } /* * Update the volume's persistent info if it's new or the ID or missing * count has changed. If a good DPM index has not been found by now, * there is no space left in the DPM table. */ if ((dpm_idx != MPR_DPM_BAD_IDX) && update_dpm) { mpr_dprint(sc, MPR_MAPPING, "%s: Update DPM entry for volume " "with target ID %d.\n", __func__, mt_entry->id); mt_entry->dpm_entry_num = dpm_idx; dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += dpm_idx; dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF & mt_entry->physical_id); dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32); dpm_entry->DeviceIndex = map_idx; dpm_entry->MappingInformation = mt_entry->missing_count; dpm_entry->PhysicalBitsMapping = 0; dpm_entry->Reserved1 = 0; sc->dpm_flush_entry[dpm_idx] = 1; sc->dpm_entry_used[dpm_idx] = 1; } else if (dpm_idx == MPR_DPM_BAD_IDX) { mpr_dprint(sc, MPR_INFO | MPR_MAPPING, "%s: No space to add an " "entry in the DPM table for volume with target ID %d.\n", __func__, mt_entry->id); } } /** * _mapping_add_to_removal_table - add DPM index to the removal table * @sc: per adapter object * @dpm_idx: Index of DPM entry to remove * * Adds a DPM entry number to the removal table. * * Returns nothing. */ static void _mapping_add_to_removal_table(struct mpr_softc *sc, u16 dpm_idx) { struct map_removal_table *remove_entry; u32 i; /* * This is only used to remove entries from the DPM in the controller. * If DPM is not enabled, just return. */ if (!sc->is_dpm_enable) return; /* * Find the first available removal_table entry and add the new entry * there. */ remove_entry = sc->removal_table; for (i = 0; i < sc->max_devices; i++, remove_entry++) { if (remove_entry->dpm_entry_num != MPR_DPM_BAD_IDX) continue; mpr_dprint(sc, MPR_MAPPING, "%s: Adding DPM entry %d to table " "for removal.\n", __func__, dpm_idx); remove_entry->dpm_entry_num = dpm_idx; break; } } /** * _mapping_inc_missing_count * @sc: per adapter object * @map_idx: index into the mapping table for the device that is missing * * Increment the missing count in the mapping table for a SAS, SATA, or PCIe * device that is not responding. If Persitent Mapping is used, increment the * DPM entry as well. Currently, this function is only called if the target * goes missing, so after initialization has completed. This means that the * missing count can only go from 0 to 1 here. The missing count is incremented * during initialization as well, so that's where a target's missing count can * go past 1. * * Returns nothing. */ static void _mapping_inc_missing_count(struct mpr_softc *sc, u32 map_idx) { u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); struct dev_mapping_table *mt_entry; Mpi2DriverMap0Entry_t *dpm_entry; if (map_idx == MPR_MAPTABLE_BAD_IDX) { mpr_dprint(sc, MPR_INFO | MPR_MAPPING, "%s: device is already " "removed from mapping table\n", __func__); return; } mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->missing_count < MPR_MAX_MISSING_COUNT) mt_entry->missing_count++; /* * When using Enc/Slot mapping, when a device is removed, it's mapping * table information should be cleared. Otherwise, the target ID will * be incorrect if this same device is re-added to a different slot. */ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { _mapping_clear_map_entry(mt_entry); } /* * When using device mapping, update the missing count in the DPM entry, * but only if the missing count has changed. */ if (((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) && sc->is_dpm_enable && mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += mt_entry->dpm_entry_num; if (dpm_entry->MappingInformation != mt_entry->missing_count) { dpm_entry->MappingInformation = mt_entry->missing_count; sc->dpm_flush_entry[mt_entry->dpm_entry_num] = 1; } } } /** * _mapping_update_missing_count - Update missing count for a device * @sc: per adapter object * @topo_change: Topology change event entry * * Search through the topology change list and if any device is found not * responding it's associated map table entry and DPM entry is updated * * Returns nothing. */ static void _mapping_update_missing_count(struct mpr_softc *sc, struct _map_topology_change *topo_change) { u8 entry; struct _map_phy_change *phy_change; u32 map_idx; for (entry = 0; entry < topo_change->num_entries; entry++) { phy_change = &topo_change->phy_details[entry]; if (!phy_change->dev_handle || (phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) continue; map_idx = _mapping_get_mt_idx_from_handle(sc, phy_change-> dev_handle); phy_change->is_processed = 1; _mapping_inc_missing_count(sc, map_idx); } } /** * _mapping_update_pcie_missing_count - Update missing count for a PCIe device * @sc: per adapter object * @topo_change: Topology change event entry * * Search through the PCIe topology change list and if any device is found not * responding it's associated map table entry and DPM entry is updated * * Returns nothing. */ static void _mapping_update_pcie_missing_count(struct mpr_softc *sc, struct _map_pcie_topology_change *topo_change) { u8 entry; struct _map_port_change *port_change; u32 map_idx; for (entry = 0; entry < topo_change->num_entries; entry++) { port_change = &topo_change->port_details[entry]; if (!port_change->dev_handle || (port_change->reason != MPI26_EVENT_PCIE_TOPO_PS_NOT_RESPONDING)) continue; map_idx = _mapping_get_mt_idx_from_handle(sc, port_change-> dev_handle); port_change->is_processed = 1; _mapping_inc_missing_count(sc, map_idx); } } /** * _mapping_find_enc_map_space -find map table entries for enclosure * @sc: per adapter object * @et_entry: enclosure entry * * Search through the mapping table defragment it and provide contiguous * space in map table for a particular enclosure entry * * Returns start index in map table or bad index. */ static u32 _mapping_find_enc_map_space(struct mpr_softc *sc, struct enc_mapping_table *et_entry) { u16 vol_mapping_flags; u32 skip_count, end_of_table, map_idx, enc_idx; u16 num_found; u32 start_idx = MPR_MAPTABLE_BAD_IDX; struct dev_mapping_table *mt_entry; struct enc_mapping_table *enc_entry; unsigned char done_flag = 0, found_space; u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs); skip_count = sc->num_rsvd_entries; num_found = 0; vol_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; /* * The end of the mapping table depends on where volumes are kept, if * IR is enabled. */ if (!sc->ir_firmware) end_of_table = sc->max_devices; else if (vol_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) end_of_table = sc->max_devices; else end_of_table = sc->max_devices - sc->max_volumes; /* * The skip_count is the number of entries that are reserved at the * beginning of the mapping table. But, it does not include the number * of Physical IDs that are reserved for direct attached devices. Look * through the mapping table after these reserved entries to see if * the devices for this enclosure are already mapped. The PHY bit check * is used to make sure that at least one PHY bit is common between the * enclosure and the device that is already mapped. */ mpr_dprint(sc, MPR_MAPPING, "%s: Looking for space in the mapping " "table for added enclosure.\n", __func__); for (map_idx = (max_num_phy_ids + skip_count); map_idx < end_of_table; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if ((et_entry->enclosure_id == mt_entry->physical_id) && (!mt_entry->phy_bits || (mt_entry->phy_bits & et_entry->phy_bits))) { num_found += 1; if (num_found == et_entry->num_slots) { start_idx = (map_idx - num_found) + 1; mpr_dprint(sc, MPR_MAPPING, "%s: Found space " "in the mapping for enclosure at map index " "%d.\n", __func__, start_idx); return start_idx; } } else num_found = 0; } /* * If the enclosure's devices are not mapped already, look for * contiguous entries in the mapping table that are not reserved. If * enough entries are found, return the starting index for that space. */ num_found = 0; for (map_idx = (max_num_phy_ids + skip_count); map_idx < end_of_table; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (!(mt_entry->device_info & MPR_DEV_RESERVED)) { num_found += 1; if (num_found == et_entry->num_slots) { start_idx = (map_idx - num_found) + 1; mpr_dprint(sc, MPR_MAPPING, "%s: Found space " "in the mapping for enclosure at map index " "%d.\n", __func__, start_idx); return start_idx; } } else num_found = 0; } /* * If here, it means that not enough space in the mapping table was * found to support this enclosure, so go through the enclosure table to * see if any enclosure entries have a missing count. If so, get the * enclosure with the highest missing count and check it to see if there * is enough space for the new enclosure. */ while (!done_flag) { enc_idx = _mapping_get_high_missing_et_idx(sc); if (enc_idx == MPR_ENCTABLE_BAD_IDX) { mpr_dprint(sc, MPR_MAPPING, "%s: Not enough space was " "found in the mapping for the added enclosure.\n", __func__); return MPR_MAPTABLE_BAD_IDX; } /* * Found a missing enclosure. Set the skip_search flag so this * enclosure is not checked again for a high missing count if * the loop continues. This way, all missing enclosures can * have their space added together to find enough space in the * mapping table for the added enclosure. The space must be * contiguous. */ mpr_dprint(sc, MPR_MAPPING, "%s: Space from a missing " "enclosure was found.\n", __func__); enc_entry = &sc->enclosure_table[enc_idx]; enc_entry->skip_search = 1; /* * Unmark all of the missing enclosure's device's reserved * space. These will be remarked as reserved if this missing * enclosure's space is not used. */ mpr_dprint(sc, MPR_MAPPING, "%s: Clear the reserved flag for " "all of the map entries for the enclosure.\n", __func__); mt_entry = &sc->mapping_table[enc_entry->start_index]; for (map_idx = enc_entry->start_index; map_idx < (enc_entry->start_index + enc_entry->num_slots); map_idx++, mt_entry++) mt_entry->device_info &= ~MPR_DEV_RESERVED; /* * Now that space has been unreserved, check again to see if * enough space is available for the new enclosure. */ mpr_dprint(sc, MPR_MAPPING, "%s: Check if new mapping space is " "enough for the new enclosure.\n", __func__); found_space = 0; num_found = 0; for (map_idx = (max_num_phy_ids + skip_count); map_idx < end_of_table; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (!(mt_entry->device_info & MPR_DEV_RESERVED)) { num_found += 1; if (num_found == et_entry->num_slots) { start_idx = (map_idx - num_found) + 1; found_space = 1; break; } } else num_found = 0; } if (!found_space) continue; /* * If enough space was found, all of the missing enclosures that * will be used for the new enclosure must be added to the * removal table. Then all mappings for the enclosure's devices * and for the enclosure itself need to be cleared. There may be * more than one enclosure to add to the removal table and * clear. */ mpr_dprint(sc, MPR_MAPPING, "%s: Found space in the mapping " "for enclosure at map index %d.\n", __func__, start_idx); for (map_idx = start_idx; map_idx < (start_idx + num_found); map_idx++) { enc_entry = sc->enclosure_table; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++, enc_entry++) { if (map_idx < enc_entry->start_index || map_idx > (enc_entry->start_index + enc_entry->num_slots)) continue; if (!enc_entry->removal_flag) { mpr_dprint(sc, MPR_MAPPING, "%s: " "Enclosure %d will be removed from " "the mapping table.\n", __func__, enc_idx); enc_entry->removal_flag = 1; _mapping_add_to_removal_table(sc, enc_entry->dpm_entry_num); } mt_entry = &sc->mapping_table[map_idx]; _mapping_clear_map_entry(mt_entry); if (map_idx == (enc_entry->start_index + enc_entry->num_slots - 1)) _mapping_clear_enc_entry(et_entry); } } /* * During the search for space for this enclosure, some entries * in the mapping table may have been unreserved. Go back and * change all of these to reserved again. Only the enclosures * with the removal_flag set should be left as unreserved. The * skip_search flag needs to be cleared as well so that the * enclosure's space will be looked at the next time space is * needed. */ enc_entry = sc->enclosure_table; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++, enc_entry++) { if (!enc_entry->removal_flag) { mpr_dprint(sc, MPR_MAPPING, "%s: Reset the " "reserved flag for all of the map entries " "for enclosure %d.\n", __func__, enc_idx); mt_entry = &sc->mapping_table[enc_entry-> start_index]; for (map_idx = enc_entry->start_index; map_idx < (enc_entry->start_index + enc_entry->num_slots); map_idx++, mt_entry++) mt_entry->device_info |= MPR_DEV_RESERVED; et_entry->skip_search = 0; } } done_flag = 1; } return start_idx; } /** * _mapping_get_dev_info -get information about newly added devices * @sc: per adapter object * @topo_change: Topology change event entry * * Search through the topology change event list and issues sas device pg0 * requests for the newly added device and reserved entries in tables * * Returns nothing */ static void _mapping_get_dev_info(struct mpr_softc *sc, struct _map_topology_change *topo_change) { u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; u8 entry, enc_idx, phy_idx; u32 map_idx, index, device_info; struct _map_phy_change *phy_change, *tmp_phy_change; uint64_t sas_address; struct enc_mapping_table *et_entry; struct dev_mapping_table *mt_entry; u8 add_code = MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED; int rc = 1; for (entry = 0; entry < topo_change->num_entries; entry++) { phy_change = &topo_change->phy_details[entry]; if (phy_change->is_processed || !phy_change->dev_handle || phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) continue; if (mpr_config_get_sas_device_pg0(sc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, phy_change->dev_handle)) { phy_change->is_processed = 1; continue; } /* * Always get SATA Identify information because this is used * to determine if Start/Stop Unit should be sent to the drive * when the system is shutdown. */ device_info = le32toh(sas_device_pg0.DeviceInfo); sas_address = le32toh(sas_device_pg0.SASAddress.High); sas_address = (sas_address << 32) | le32toh(sas_device_pg0.SASAddress.Low); if ((device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE) && (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)) { rc = mprsas_get_sas_address_for_sata_disk(sc, &sas_address, phy_change->dev_handle, device_info, &phy_change->is_SATA_SSD); if (rc) { mpr_dprint(sc, MPR_ERROR, "%s: failed to get " "disk type (SSD or HDD) and SAS Address " "for SATA device with handle 0x%04x\n", __func__, phy_change->dev_handle); } } phy_change->physical_id = sas_address; phy_change->slot = le16toh(sas_device_pg0.Slot); phy_change->device_info = device_info; /* * When using Enc/Slot mapping, if this device is an enclosure * make sure that all of its slots can fit into the mapping * table. */ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { /* * The enclosure should already be in the enclosure * table due to the Enclosure Add event. If not, just * continue, nothing can be done. */ enc_idx = _mapping_get_enc_idx_from_handle(sc, topo_change->enc_handle); if (enc_idx == MPR_ENCTABLE_BAD_IDX) { phy_change->is_processed = 1; mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: " "failed to add the device with handle " "0x%04x because the enclosure is not in " "the mapping table\n", __func__, phy_change->dev_handle); continue; } if (!((phy_change->device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE) && (phy_change->device_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET | MPI2_SAS_DEVICE_INFO_STP_TARGET | MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))) { phy_change->is_processed = 1; continue; } et_entry = &sc->enclosure_table[enc_idx]; /* * If the enclosure already has a start_index, it's been * mapped, so go to the next Topo change. */ if (et_entry->start_index != MPR_MAPTABLE_BAD_IDX) continue; /* * If the Expander Handle is 0, the devices are direct * attached. In that case, the start_index must be just * after the reserved entries. Otherwise, find space in * the mapping table for the enclosure's devices. */ if (!topo_change->exp_handle) { map_idx = sc->num_rsvd_entries; et_entry->start_index = map_idx; } else { map_idx = _mapping_find_enc_map_space(sc, et_entry); et_entry->start_index = map_idx; /* * If space cannot be found to hold all of the * enclosure's devices in the mapping table, * there's no need to continue checking the * other devices in this event. Set all of the * phy_details for this event (if the change is * for an add) as already processed because none * of these devices can be added to the mapping * table. */ if (et_entry->start_index == MPR_MAPTABLE_BAD_IDX) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: failed to add the enclosure " "with ID 0x%016jx because there is " "no free space available in the " "mapping table for all of the " "enclosure's devices.\n", __func__, (uintmax_t)et_entry->enclosure_id); phy_change->is_processed = 1; for (phy_idx = 0; phy_idx < topo_change->num_entries; phy_idx++) { tmp_phy_change = &topo_change->phy_details [phy_idx]; if (tmp_phy_change->reason == add_code) tmp_phy_change-> is_processed = 1; } break; } } /* * Found space in the mapping table for this enclosure. * Initialize each mapping table entry for the * enclosure. */ mpr_dprint(sc, MPR_MAPPING, "%s: Initialize %d map " "entries for the enclosure, starting at map index " " %d.\n", __func__, et_entry->num_slots, map_idx); mt_entry = &sc->mapping_table[map_idx]; for (index = map_idx; index < (et_entry->num_slots + map_idx); index++, mt_entry++) { mt_entry->device_info = MPR_DEV_RESERVED; mt_entry->physical_id = et_entry->enclosure_id; mt_entry->phy_bits = et_entry->phy_bits; mt_entry->missing_count = 0; } } } } /** * _mapping_get_pcie_dev_info -get information about newly added PCIe devices * @sc: per adapter object * @topo_change: Topology change event entry * * Searches through the PCIe topology change event list and issues PCIe device * pg0 requests for the newly added PCIe device. If the device is in an * enclosure, search for available space in the enclosure mapping table for the * device and reserve that space. * * Returns nothing */ static void _mapping_get_pcie_dev_info(struct mpr_softc *sc, struct _map_pcie_topology_change *topo_change) { u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); Mpi2ConfigReply_t mpi_reply; Mpi26PCIeDevicePage0_t pcie_device_pg0; u8 entry, enc_idx, port_idx; u32 map_idx, index; struct _map_port_change *port_change, *tmp_port_change; uint64_t pcie_wwid; struct enc_mapping_table *et_entry; struct dev_mapping_table *mt_entry; u8 add_code = MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED; for (entry = 0; entry < topo_change->num_entries; entry++) { port_change = &topo_change->port_details[entry]; if (port_change->is_processed || !port_change->dev_handle || port_change->reason != MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED) continue; if (mpr_config_get_pcie_device_pg0(sc, &mpi_reply, &pcie_device_pg0, MPI26_PCIE_DEVICE_PGAD_FORM_HANDLE, port_change->dev_handle)) { port_change->is_processed = 1; continue; } pcie_wwid = pcie_device_pg0.WWID.High; pcie_wwid = (pcie_wwid << 32) | pcie_device_pg0.WWID.Low; port_change->physical_id = pcie_wwid; port_change->slot = le16toh(pcie_device_pg0.Slot); port_change->device_info = le32toh(pcie_device_pg0.DeviceInfo); /* * When using Enc/Slot mapping, if this device is an enclosure * make sure that all of its slots can fit into the mapping * table. */ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { /* * The enclosure should already be in the enclosure * table due to the Enclosure Add event. If not, just * continue, nothing can be done. */ enc_idx = _mapping_get_enc_idx_from_handle(sc, topo_change->enc_handle); if (enc_idx == MPR_ENCTABLE_BAD_IDX) { port_change->is_processed = 1; mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: " "failed to add the device with handle " "0x%04x because the enclosure is not in " "the mapping table\n", __func__, port_change->dev_handle); continue; } if (!(port_change->device_info & MPI26_PCIE_DEVINFO_NVME)) { port_change->is_processed = 1; continue; } et_entry = &sc->enclosure_table[enc_idx]; /* * If the enclosure already has a start_index, it's been * mapped, so go to the next Topo change. */ if (et_entry->start_index != MPR_MAPTABLE_BAD_IDX) continue; /* * If the Switch Handle is 0, the devices are direct * attached. In that case, the start_index must be just * after the reserved entries. Otherwise, find space in * the mapping table for the enclosure's devices. */ if (!topo_change->switch_dev_handle) { map_idx = sc->num_rsvd_entries; et_entry->start_index = map_idx; } else { map_idx = _mapping_find_enc_map_space(sc, et_entry); et_entry->start_index = map_idx; /* * If space cannot be found to hold all of the * enclosure's devices in the mapping table, * there's no need to continue checking the * other devices in this event. Set all of the * port_details for this event (if the change is * for an add) as already processed because none * of these devices can be added to the mapping * table. */ if (et_entry->start_index == MPR_MAPTABLE_BAD_IDX) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: failed to add the enclosure " "with ID 0x%016jx because there is " "no free space available in the " "mapping table for all of the " "enclosure's devices.\n", __func__, (uintmax_t)et_entry->enclosure_id); port_change->is_processed = 1; for (port_idx = 0; port_idx < topo_change->num_entries; port_idx++) { tmp_port_change = &topo_change->port_details [port_idx]; if (tmp_port_change->reason == add_code) tmp_port_change-> is_processed = 1; } break; } } /* * Found space in the mapping table for this enclosure. * Initialize each mapping table entry for the * enclosure. */ mpr_dprint(sc, MPR_MAPPING, "%s: Initialize %d map " "entries for the enclosure, starting at map index " " %d.\n", __func__, et_entry->num_slots, map_idx); mt_entry = &sc->mapping_table[map_idx]; for (index = map_idx; index < (et_entry->num_slots + map_idx); index++, mt_entry++) { mt_entry->device_info = MPR_DEV_RESERVED; mt_entry->physical_id = et_entry->enclosure_id; mt_entry->phy_bits = et_entry->phy_bits; mt_entry->missing_count = 0; } } } } /** * _mapping_set_mid_to_eid -set map table data from enclosure table * @sc: per adapter object * @et_entry: enclosure entry * * Returns nothing */ static inline void _mapping_set_mid_to_eid(struct mpr_softc *sc, struct enc_mapping_table *et_entry) { struct dev_mapping_table *mt_entry; u16 slots = et_entry->num_slots, map_idx; u32 start_idx = et_entry->start_index; if (start_idx != MPR_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[start_idx]; for (map_idx = 0; map_idx < slots; map_idx++, mt_entry++) mt_entry->physical_id = et_entry->enclosure_id; } } /** * _mapping_clear_removed_entries - mark the entries to be cleared * @sc: per adapter object * * Search through the removal table and mark the entries which needs to be * flushed to DPM and also updates the map table and enclosure table by * clearing the corresponding entries. * * Returns nothing */ static void _mapping_clear_removed_entries(struct mpr_softc *sc) { u32 remove_idx; struct map_removal_table *remove_entry; Mpi2DriverMap0Entry_t *dpm_entry; u8 done_flag = 0, num_entries, m, i; struct enc_mapping_table *et_entry, *from, *to; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); if (sc->is_dpm_enable) { remove_entry = sc->removal_table; for (remove_idx = 0; remove_idx < sc->max_devices; remove_idx++, remove_entry++) { if (remove_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *) sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += remove_entry->dpm_entry_num; dpm_entry->PhysicalIdentifier.Low = 0; dpm_entry->PhysicalIdentifier.High = 0; dpm_entry->DeviceIndex = 0; dpm_entry->MappingInformation = 0; dpm_entry->PhysicalBitsMapping = 0; sc->dpm_flush_entry[remove_entry-> dpm_entry_num] = 1; sc->dpm_entry_used[remove_entry->dpm_entry_num] = 0; remove_entry->dpm_entry_num = MPR_DPM_BAD_IDX; } } } /* * When using Enc/Slot mapping, if a new enclosure was added and old * enclosure space was needed, the enclosure table may now have gaps * that need to be closed. All enclosure mappings need to be contiguous * so that space can be reused correctly if available. */ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { num_entries = sc->num_enc_table_entries; while (!done_flag) { done_flag = 1; et_entry = sc->enclosure_table; for (i = 0; i < num_entries; i++, et_entry++) { if (!et_entry->enc_handle && et_entry-> init_complete) { done_flag = 0; if (i != (num_entries - 1)) { from = &sc->enclosure_table [i+1]; to = &sc->enclosure_table[i]; for (m = i; m < (num_entries - 1); m++, from++, to++) { _mapping_set_mid_to_eid (sc, to); *to = *from; } _mapping_clear_enc_entry(to); sc->num_enc_table_entries--; num_entries = sc->num_enc_table_entries; } else { _mapping_clear_enc_entry (et_entry); sc->num_enc_table_entries--; num_entries = sc->num_enc_table_entries; } } } } } } /** * _mapping_add_new_device -Add the new device into mapping table * @sc: per adapter object * @topo_change: Topology change event entry * * Search through the topology change event list and update map table, * enclosure table and DPM pages for the newly added devices. * * Returns nothing */ static void _mapping_add_new_device(struct mpr_softc *sc, struct _map_topology_change *topo_change) { u8 enc_idx, missing_cnt, is_removed = 0; u16 dpm_idx; u32 search_idx, map_idx; u32 entry; struct dev_mapping_table *mt_entry; struct enc_mapping_table *et_entry; struct _map_phy_change *phy_change; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); Mpi2DriverMap0Entry_t *dpm_entry; uint64_t temp64_var; u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT; u8 hdr_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER); u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs); for (entry = 0; entry < topo_change->num_entries; entry++) { phy_change = &topo_change->phy_details[entry]; if (phy_change->is_processed) continue; if (phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED || !phy_change->dev_handle) { phy_change->is_processed = 1; continue; } if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { enc_idx = _mapping_get_enc_idx_from_handle (sc, topo_change->enc_handle); if (enc_idx == MPR_ENCTABLE_BAD_IDX) { phy_change->is_processed = 1; mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: " "failed to add the device with handle " "0x%04x because the enclosure is not in " "the mapping table\n", __func__, phy_change->dev_handle); continue; } /* * If the enclosure's start_index is BAD here, it means * that there is no room in the mapping table to cover * all of the devices that could be in the enclosure. * There's no reason to process any of the devices for * this enclosure since they can't be mapped. */ et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->start_index == MPR_MAPTABLE_BAD_IDX) { phy_change->is_processed = 1; mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: " "failed to add the device with handle " "0x%04x because there is no free space " "available in the mapping table\n", __func__, phy_change->dev_handle); continue; } /* * Add this device to the mapping table at the correct * offset where space was found to map the enclosure. * Then setup the DPM entry information if being used. */ map_idx = et_entry->start_index + phy_change->slot - et_entry->start_slot; mt_entry = &sc->mapping_table[map_idx]; mt_entry->physical_id = phy_change->physical_id; mt_entry->id = map_idx; mt_entry->dev_handle = phy_change->dev_handle; mt_entry->missing_count = 0; mt_entry->dpm_entry_num = et_entry->dpm_entry_num; mt_entry->device_info = phy_change->device_info | (MPR_DEV_RESERVED | MPR_MAP_IN_USE); if (sc->is_dpm_enable) { dpm_idx = et_entry->dpm_entry_num; if (dpm_idx == MPR_DPM_BAD_IDX) dpm_idx = _mapping_get_dpm_idx_from_id (sc, et_entry->enclosure_id, et_entry->phy_bits); if (dpm_idx == MPR_DPM_BAD_IDX) { dpm_idx = _mapping_get_free_dpm_idx(sc); if (dpm_idx != MPR_DPM_BAD_IDX) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *) sc->dpm_pg0 + hdr_sz); dpm_entry += dpm_idx; dpm_entry-> PhysicalIdentifier.Low = (0xFFFFFFFF & et_entry->enclosure_id); dpm_entry-> PhysicalIdentifier.High = (et_entry->enclosure_id >> 32); dpm_entry->DeviceIndex = (U16)et_entry->start_index; dpm_entry->MappingInformation = et_entry->num_slots; dpm_entry->MappingInformation <<= map_shift; dpm_entry->PhysicalBitsMapping = et_entry->phy_bits; et_entry->dpm_entry_num = dpm_idx; sc->dpm_entry_used[dpm_idx] = 1; sc->dpm_flush_entry[dpm_idx] = 1; phy_change->is_processed = 1; } else { phy_change->is_processed = 1; mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: failed " "to add the device with " "handle 0x%04x to " "persistent table because " "there is no free space " "available\n", __func__, phy_change->dev_handle); } } else { et_entry->dpm_entry_num = dpm_idx; mt_entry->dpm_entry_num = dpm_idx; } } et_entry->init_complete = 1; } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) { /* * Get the mapping table index for this device. If it's * not in the mapping table yet, find a free entry if * one is available. If there are no free entries, look * for the entry that has the highest missing count. If * none of that works to find an entry in the mapping * table, there is a problem. Log a message and just * continue on. */ map_idx = _mapping_get_mt_idx_from_id (sc, phy_change->physical_id); if (map_idx == MPR_MAPTABLE_BAD_IDX) { search_idx = sc->num_rsvd_entries; if (topo_change->exp_handle) search_idx += max_num_phy_ids; map_idx = _mapping_get_free_mt_idx(sc, search_idx); } /* * If an entry will be used that has a missing device, * clear its entry from the DPM in the controller. */ if (map_idx == MPR_MAPTABLE_BAD_IDX) { map_idx = _mapping_get_high_missing_mt_idx(sc); if (map_idx != MPR_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[map_idx]; _mapping_add_to_removal_table(sc, mt_entry->dpm_entry_num); is_removed = 1; mt_entry->init_complete = 0; } } if (map_idx != MPR_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[map_idx]; mt_entry->physical_id = phy_change->physical_id; mt_entry->id = map_idx; mt_entry->dev_handle = phy_change->dev_handle; mt_entry->missing_count = 0; mt_entry->device_info = phy_change->device_info | (MPR_DEV_RESERVED | MPR_MAP_IN_USE); } else { phy_change->is_processed = 1; mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: " "failed to add the device with handle " "0x%04x because there is no free space " "available in the mapping table\n", __func__, phy_change->dev_handle); continue; } if (sc->is_dpm_enable) { if (mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { dpm_idx = mt_entry->dpm_entry_num; dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + hdr_sz); dpm_entry += dpm_idx; missing_cnt = dpm_entry-> MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK; temp64_var = dpm_entry-> PhysicalIdentifier.High; temp64_var = (temp64_var << 32) | dpm_entry->PhysicalIdentifier.Low; /* * If the Mapping Table's info is not * the same as the DPM entry, clear the * init_complete flag so that it's * updated. */ if ((mt_entry->physical_id == temp64_var) && !missing_cnt) mt_entry->init_complete = 1; else mt_entry->init_complete = 0; } else { dpm_idx = _mapping_get_free_dpm_idx(sc); mt_entry->init_complete = 0; } if (dpm_idx != MPR_DPM_BAD_IDX && !mt_entry->init_complete) { mt_entry->dpm_entry_num = dpm_idx; dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + hdr_sz); dpm_entry += dpm_idx; dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF & mt_entry->physical_id); dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32); dpm_entry->DeviceIndex = (U16) map_idx; dpm_entry->MappingInformation = 0; dpm_entry->PhysicalBitsMapping = 0; sc->dpm_entry_used[dpm_idx] = 1; sc->dpm_flush_entry[dpm_idx] = 1; phy_change->is_processed = 1; } else if (dpm_idx == MPR_DPM_BAD_IDX) { phy_change->is_processed = 1; mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: failed to add the device with " "handle 0x%04x to persistent table " "because there is no free space " "available\n", __func__, phy_change->dev_handle); } } mt_entry->init_complete = 1; } phy_change->is_processed = 1; } if (is_removed) _mapping_clear_removed_entries(sc); } /** * _mapping_add_new_pcie_device -Add the new PCIe device into mapping table * @sc: per adapter object * @topo_change: Topology change event entry * * Search through the PCIe topology change event list and update map table, * enclosure table and DPM pages for the newly added devices. * * Returns nothing */ static void _mapping_add_new_pcie_device(struct mpr_softc *sc, struct _map_pcie_topology_change *topo_change) { u8 enc_idx, missing_cnt, is_removed = 0; u16 dpm_idx; u32 search_idx, map_idx; u32 entry; struct dev_mapping_table *mt_entry; struct enc_mapping_table *et_entry; struct _map_port_change *port_change; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); Mpi2DriverMap0Entry_t *dpm_entry; uint64_t temp64_var; u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT; u8 hdr_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER); u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs); for (entry = 0; entry < topo_change->num_entries; entry++) { port_change = &topo_change->port_details[entry]; if (port_change->is_processed) continue; if (port_change->reason != MPI26_EVENT_PCIE_TOPO_PS_DEV_ADDED || !port_change->dev_handle) { port_change->is_processed = 1; continue; } if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { enc_idx = _mapping_get_enc_idx_from_handle (sc, topo_change->enc_handle); if (enc_idx == MPR_ENCTABLE_BAD_IDX) { port_change->is_processed = 1; mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: " "failed to add the device with handle " "0x%04x because the enclosure is not in " "the mapping table\n", __func__, port_change->dev_handle); continue; } /* * If the enclosure's start_index is BAD here, it means * that there is no room in the mapping table to cover * all of the devices that could be in the enclosure. * There's no reason to process any of the devices for * this enclosure since they can't be mapped. */ et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->start_index == MPR_MAPTABLE_BAD_IDX) { port_change->is_processed = 1; mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: " "failed to add the device with handle " "0x%04x because there is no free space " "available in the mapping table\n", __func__, port_change->dev_handle); continue; } /* * Add this device to the mapping table at the correct * offset where space was found to map the enclosure. * Then setup the DPM entry information if being used. */ map_idx = et_entry->start_index + port_change->slot - et_entry->start_slot; mt_entry = &sc->mapping_table[map_idx]; mt_entry->physical_id = port_change->physical_id; mt_entry->id = map_idx; mt_entry->dev_handle = port_change->dev_handle; mt_entry->missing_count = 0; mt_entry->dpm_entry_num = et_entry->dpm_entry_num; mt_entry->device_info = port_change->device_info | (MPR_DEV_RESERVED | MPR_MAP_IN_USE); if (sc->is_dpm_enable) { dpm_idx = et_entry->dpm_entry_num; if (dpm_idx == MPR_DPM_BAD_IDX) dpm_idx = _mapping_get_dpm_idx_from_id (sc, et_entry->enclosure_id, et_entry->phy_bits); if (dpm_idx == MPR_DPM_BAD_IDX) { dpm_idx = _mapping_get_free_dpm_idx(sc); if (dpm_idx != MPR_DPM_BAD_IDX) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *) sc->dpm_pg0 + hdr_sz); dpm_entry += dpm_idx; dpm_entry-> PhysicalIdentifier.Low = (0xFFFFFFFF & et_entry->enclosure_id); dpm_entry-> PhysicalIdentifier.High = (et_entry->enclosure_id >> 32); dpm_entry->DeviceIndex = (U16)et_entry->start_index; dpm_entry->MappingInformation = et_entry->num_slots; dpm_entry->MappingInformation <<= map_shift; dpm_entry->PhysicalBitsMapping = et_entry->phy_bits; et_entry->dpm_entry_num = dpm_idx; sc->dpm_entry_used[dpm_idx] = 1; sc->dpm_flush_entry[dpm_idx] = 1; port_change->is_processed = 1; } else { port_change->is_processed = 1; mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: failed " "to add the device with " "handle 0x%04x to " "persistent table because " "there is no free space " "available\n", __func__, port_change->dev_handle); } } else { et_entry->dpm_entry_num = dpm_idx; mt_entry->dpm_entry_num = dpm_idx; } } et_entry->init_complete = 1; } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) { /* * Get the mapping table index for this device. If it's * not in the mapping table yet, find a free entry if * one is available. If there are no free entries, look * for the entry that has the highest missing count. If * none of that works to find an entry in the mapping * table, there is a problem. Log a message and just * continue on. */ map_idx = _mapping_get_mt_idx_from_id (sc, port_change->physical_id); if (map_idx == MPR_MAPTABLE_BAD_IDX) { search_idx = sc->num_rsvd_entries; if (topo_change->switch_dev_handle) search_idx += max_num_phy_ids; map_idx = _mapping_get_free_mt_idx(sc, search_idx); } /* * If an entry will be used that has a missing device, * clear its entry from the DPM in the controller. */ if (map_idx == MPR_MAPTABLE_BAD_IDX) { map_idx = _mapping_get_high_missing_mt_idx(sc); if (map_idx != MPR_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[map_idx]; _mapping_add_to_removal_table(sc, mt_entry->dpm_entry_num); is_removed = 1; mt_entry->init_complete = 0; } } if (map_idx != MPR_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[map_idx]; mt_entry->physical_id = port_change->physical_id; mt_entry->id = map_idx; mt_entry->dev_handle = port_change->dev_handle; mt_entry->missing_count = 0; mt_entry->device_info = port_change->device_info | (MPR_DEV_RESERVED | MPR_MAP_IN_USE); } else { port_change->is_processed = 1; mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: " "failed to add the device with handle " "0x%04x because there is no free space " "available in the mapping table\n", __func__, port_change->dev_handle); continue; } if (sc->is_dpm_enable) { if (mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { dpm_idx = mt_entry->dpm_entry_num; dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + hdr_sz); dpm_entry += dpm_idx; missing_cnt = dpm_entry-> MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK; temp64_var = dpm_entry-> PhysicalIdentifier.High; temp64_var = (temp64_var << 32) | dpm_entry->PhysicalIdentifier.Low; /* * If the Mapping Table's info is not * the same as the DPM entry, clear the * init_complete flag so that it's * updated. */ if ((mt_entry->physical_id == temp64_var) && !missing_cnt) mt_entry->init_complete = 1; else mt_entry->init_complete = 0; } else { dpm_idx = _mapping_get_free_dpm_idx(sc); mt_entry->init_complete = 0; } if (dpm_idx != MPR_DPM_BAD_IDX && !mt_entry->init_complete) { mt_entry->dpm_entry_num = dpm_idx; dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + hdr_sz); dpm_entry += dpm_idx; dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF & mt_entry->physical_id); dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32); dpm_entry->DeviceIndex = (U16) map_idx; dpm_entry->MappingInformation = 0; dpm_entry->PhysicalBitsMapping = 0; sc->dpm_entry_used[dpm_idx] = 1; sc->dpm_flush_entry[dpm_idx] = 1; port_change->is_processed = 1; } else if (dpm_idx == MPR_DPM_BAD_IDX) { port_change->is_processed = 1; mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: failed to add the device with " "handle 0x%04x to persistent table " "because there is no free space " "available\n", __func__, port_change->dev_handle); } } mt_entry->init_complete = 1; } port_change->is_processed = 1; } if (is_removed) _mapping_clear_removed_entries(sc); } /** * _mapping_flush_dpm_pages -Flush the DPM pages to NVRAM * @sc: per adapter object * * Returns nothing */ static void _mapping_flush_dpm_pages(struct mpr_softc *sc) { Mpi2DriverMap0Entry_t *dpm_entry; Mpi2ConfigReply_t mpi_reply; Mpi2DriverMappingPage0_t config_page; u16 entry_num; for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) { if (!sc->dpm_flush_entry[entry_num]) continue; memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t)); memcpy(&config_page.Header, (u8 *)sc->dpm_pg0, sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += entry_num; dpm_entry->MappingInformation = htole16(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = htole16(dpm_entry->DeviceIndex); dpm_entry->PhysicalBitsMapping = htole32(dpm_entry-> PhysicalBitsMapping); memcpy(&config_page.Entry, (u8 *)dpm_entry, sizeof(Mpi2DriverMap0Entry_t)); /* TODO-How to handle failed writes? */ mpr_dprint(sc, MPR_MAPPING, "%s: Flushing DPM entry %d.\n", __func__, entry_num); if (mpr_config_set_dpm_pg0(sc, &mpi_reply, &config_page, entry_num)) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Flush of " "DPM entry %d for device failed\n", __func__, entry_num); } else sc->dpm_flush_entry[entry_num] = 0; dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); dpm_entry->PhysicalBitsMapping = le32toh(dpm_entry-> PhysicalBitsMapping); } } /** * _mapping_allocate_memory- allocates the memory required for mapping tables * @sc: per adapter object * * Allocates the memory for all the tables required for host mapping * * Return 0 on success or non-zero on failure. */ int mpr_mapping_allocate_memory(struct mpr_softc *sc) { uint32_t dpm_pg0_sz; - sc->mapping_table = malloc((sizeof(struct dev_mapping_table) * - sc->max_devices), M_MPR, M_ZERO|M_NOWAIT); + sc->mapping_table = mallocarray(sc->max_devices, + sizeof(struct dev_mapping_table), M_MPR, M_ZERO|M_NOWAIT); if (!sc->mapping_table) goto free_resources; - sc->removal_table = malloc((sizeof(struct map_removal_table) * - sc->max_devices), M_MPR, M_ZERO|M_NOWAIT); + sc->removal_table = mallocarray(sc->max_devices, + sizeof(struct map_removal_table), M_MPR, M_ZERO|M_NOWAIT); if (!sc->removal_table) goto free_resources; - sc->enclosure_table = malloc((sizeof(struct enc_mapping_table) * - sc->max_enclosures), M_MPR, M_ZERO|M_NOWAIT); + sc->enclosure_table = mallocarray(sc->max_enclosures, + sizeof(struct enc_mapping_table), M_MPR, M_ZERO|M_NOWAIT); if (!sc->enclosure_table) goto free_resources; - sc->dpm_entry_used = malloc((sizeof(u8) * sc->max_dpm_entries), + sc->dpm_entry_used = mallocarray(sc->max_dpm_entries, sizeof(u8), M_MPR, M_ZERO|M_NOWAIT); if (!sc->dpm_entry_used) goto free_resources; - sc->dpm_flush_entry = malloc((sizeof(u8) * sc->max_dpm_entries), + sc->dpm_flush_entry = mallocarray(sc->max_dpm_entries, sizeof(u8), M_MPR, M_ZERO|M_NOWAIT); if (!sc->dpm_flush_entry) goto free_resources; dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) + (sc->max_dpm_entries * sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY)); sc->dpm_pg0 = malloc(dpm_pg0_sz, M_MPR, M_ZERO|M_NOWAIT); if (!sc->dpm_pg0) { printf("%s: memory alloc failed for dpm page; disabling dpm\n", __func__); sc->is_dpm_enable = 0; } return 0; free_resources: free(sc->mapping_table, M_MPR); free(sc->removal_table, M_MPR); free(sc->enclosure_table, M_MPR); free(sc->dpm_entry_used, M_MPR); free(sc->dpm_flush_entry, M_MPR); free(sc->dpm_pg0, M_MPR); printf("%s: device initialization failed due to failure in mapping " "table memory allocation\n", __func__); return -1; } /** * mpr_mapping_free_memory- frees the memory allocated for mapping tables * @sc: per adapter object * * Returns nothing. */ void mpr_mapping_free_memory(struct mpr_softc *sc) { free(sc->mapping_table, M_MPR); free(sc->removal_table, M_MPR); free(sc->enclosure_table, M_MPR); free(sc->dpm_entry_used, M_MPR); free(sc->dpm_flush_entry, M_MPR); free(sc->dpm_pg0, M_MPR); } static bool _mapping_process_dpm_pg0(struct mpr_softc *sc) { u8 missing_cnt, enc_idx; u16 slot_id, entry_num, num_slots; u32 map_idx, dev_idx, start_idx, end_idx; struct dev_mapping_table *mt_entry; Mpi2DriverMap0Entry_t *dpm_entry; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs); struct enc_mapping_table *et_entry; u64 physical_id; u32 phy_bits = 0; /* * start_idx and end_idx are only used for IR. */ if (sc->ir_firmware) _mapping_get_ir_maprange(sc, &start_idx, &end_idx); /* * Look through all of the DPM entries that were read from the * controller and copy them over to the driver's internal table if they * have a non-zero ID. At this point, any ID with a value of 0 would be * invalid, so don't copy it. */ mpr_dprint(sc, MPR_MAPPING, "%s: Start copy of %d DPM entries into the " "mapping table.\n", __func__, sc->max_dpm_entries); dpm_entry = (Mpi2DriverMap0Entry_t *) ((uint8_t *) sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++, dpm_entry++) { physical_id = dpm_entry->PhysicalIdentifier.High; physical_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; if (!physical_id) { sc->dpm_entry_used[entry_num] = 0; continue; } sc->dpm_entry_used[entry_num] = 1; dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); missing_cnt = dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK; dev_idx = le16toh(dpm_entry->DeviceIndex); phy_bits = le32toh(dpm_entry->PhysicalBitsMapping); /* * Volumes are at special locations in the mapping table so * account for that. Volume mapping table entries do not depend * on the type of mapping, so continue the loop after adding * volumes to the mapping table. */ if (sc->ir_firmware && (dev_idx >= start_idx) && (dev_idx <= end_idx)) { mt_entry = &sc->mapping_table[dev_idx]; mt_entry->physical_id = dpm_entry->PhysicalIdentifier.High; mt_entry->physical_id = (mt_entry->physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; mt_entry->id = dev_idx; mt_entry->missing_count = missing_cnt; mt_entry->dpm_entry_num = entry_num; mt_entry->device_info = MPR_DEV_RESERVED; continue; } if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { /* * The dev_idx for an enclosure is the start index. If * the start index is within the controller's default * enclosure area, set the number of slots for this * enclosure to the max allowed. Otherwise, it should be * a normal enclosure and the number of slots is in the * DPM entry's Mapping Information. */ if (dev_idx < (sc->num_rsvd_entries + max_num_phy_ids)) { slot_id = 0; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1) slot_id = 1; num_slots = max_num_phy_ids; } else { slot_id = 0; num_slots = dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_SLOT_MASK; num_slots >>= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT; } enc_idx = sc->num_enc_table_entries; if (enc_idx >= sc->max_enclosures) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: " "Number of enclosure entries in DPM exceed " "the max allowed of %d.\n", __func__, sc->max_enclosures); break; } sc->num_enc_table_entries++; et_entry = &sc->enclosure_table[enc_idx]; physical_id = dpm_entry->PhysicalIdentifier.High; et_entry->enclosure_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; et_entry->start_index = dev_idx; et_entry->dpm_entry_num = entry_num; et_entry->num_slots = num_slots; et_entry->start_slot = slot_id; et_entry->missing_count = missing_cnt; et_entry->phy_bits = phy_bits; /* * Initialize all entries for this enclosure in the * mapping table and mark them as reserved. The actual * devices have not been processed yet but when they are * they will use these entries. If an entry is found * that already has a valid DPM index, the mapping table * is corrupt. This can happen if the mapping type is * changed without clearing all of the DPM entries in * the controller. */ mt_entry = &sc->mapping_table[dev_idx]; for (map_idx = dev_idx; map_idx < (dev_idx + num_slots); map_idx++, mt_entry++) { if (mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Conflict in mapping table for " " enclosure %d\n", __func__, enc_idx); goto fail; } physical_id = dpm_entry->PhysicalIdentifier.High; mt_entry->physical_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; mt_entry->phy_bits = phy_bits; mt_entry->id = dev_idx; mt_entry->dpm_entry_num = entry_num; mt_entry->missing_count = missing_cnt; mt_entry->device_info = MPR_DEV_RESERVED; } } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) { /* * Device mapping, so simply copy the DPM entries to the * mapping table, but check for a corrupt mapping table * (as described above in Enc/Slot mapping). */ map_idx = dev_idx; mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: " "Conflict in mapping table for device %d\n", __func__, map_idx); goto fail; } physical_id = dpm_entry->PhysicalIdentifier.High; mt_entry->physical_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; mt_entry->phy_bits = phy_bits; mt_entry->id = dev_idx; mt_entry->missing_count = missing_cnt; mt_entry->dpm_entry_num = entry_num; mt_entry->device_info = MPR_DEV_RESERVED; } } /*close the loop for DPM table */ return (true); fail: for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) { sc->dpm_entry_used[entry_num] = 0; /* * for IR firmware, it may be necessary to wipe out * sc->mapping_table volumes tooi */ } sc->num_enc_table_entries = 0; return (false); } /* * mpr_mapping_check_devices - start of the day check for device availabilty * @sc: per adapter object * * Returns nothing. */ void mpr_mapping_check_devices(void *data) { u32 i; struct dev_mapping_table *mt_entry; struct mpr_softc *sc = (struct mpr_softc *)data; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); struct enc_mapping_table *et_entry; u32 start_idx = 0, end_idx = 0; u8 stop_device_checks = 0; MPR_FUNCTRACE(sc); /* * Clear this flag so that this function is never called again except * within this function if the check needs to be done again. The * purpose is to check for missing devices that are currently in the * mapping table so do this only at driver init after discovery. */ sc->track_mapping_events = 0; /* * callout synchronization * This is used to prevent race conditions for the callout. */ mpr_dprint(sc, MPR_MAPPING, "%s: Start check for missing devices.\n", __func__); mtx_assert(&sc->mpr_mtx, MA_OWNED); if ((callout_pending(&sc->device_check_callout)) || (!callout_active(&sc->device_check_callout))) { mpr_dprint(sc, MPR_MAPPING, "%s: Device Check Callout is " "already pending or not active.\n", __func__); return; } callout_deactivate(&sc->device_check_callout); /* * Use callout to check if any devices in the mapping table have been * processed yet. If ALL devices are marked as not init_complete, no * devices have been processed and mapped. Until devices are mapped * there's no reason to mark them as missing. Continue resetting this * callout until devices have been mapped. */ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { et_entry = sc->enclosure_table; for (i = 0; i < sc->num_enc_table_entries; i++, et_entry++) { if (et_entry->init_complete) { stop_device_checks = 1; break; } } } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) { mt_entry = sc->mapping_table; for (i = 0; i < sc->max_devices; i++, mt_entry++) { if (mt_entry->init_complete) { stop_device_checks = 1; break; } } } /* * Setup another callout check after a delay. Keep doing this until * devices are mapped. */ if (!stop_device_checks) { mpr_dprint(sc, MPR_MAPPING, "%s: No devices have been mapped. " "Reset callout to check again after a %d second delay.\n", __func__, MPR_MISSING_CHECK_DELAY); callout_reset(&sc->device_check_callout, MPR_MISSING_CHECK_DELAY * hz, mpr_mapping_check_devices, sc); return; } mpr_dprint(sc, MPR_MAPPING, "%s: Device check complete.\n", __func__); /* * Depending on the mapping type, check if devices have been processed * and update their missing counts if not processed. */ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { et_entry = sc->enclosure_table; for (i = 0; i < sc->num_enc_table_entries; i++, et_entry++) { if (!et_entry->init_complete) { if (et_entry->missing_count < MPR_MAX_MISSING_COUNT) { mpr_dprint(sc, MPR_MAPPING, "%s: " "Enclosure %d is missing from the " "topology. Update its missing " "count.\n", __func__, i); et_entry->missing_count++; if (et_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { _mapping_commit_enc_entry(sc, et_entry); } } et_entry->init_complete = 1; } } if (!sc->ir_firmware) return; _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) { start_idx = 0; end_idx = sc->max_devices - 1; mt_entry = sc->mapping_table; } /* * The start and end indices have been set above according to the * mapping type. Go through these mappings and update any entries that * do not have the init_complete flag set, which means they are missing. */ if (end_idx == 0) return; for (i = start_idx; i < (end_idx + 1); i++, mt_entry++) { if (mt_entry->device_info & MPR_DEV_RESERVED && !mt_entry->physical_id) mt_entry->init_complete = 1; else if (mt_entry->device_info & MPR_DEV_RESERVED) { if (!mt_entry->init_complete) { mpr_dprint(sc, MPR_MAPPING, "%s: Device in " "mapping table at index %d is missing from " "topology. Update its missing count.\n", __func__, i); if (mt_entry->missing_count < MPR_MAX_MISSING_COUNT) { mt_entry->missing_count++; if (mt_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { _mapping_commit_map_entry(sc, mt_entry); } } mt_entry->init_complete = 1; } } } } /** * mpr_mapping_initialize - initialize mapping tables * @sc: per adapter object * * Read controller persitant mapping tables into internal data area. * * Return 0 for success or non-zero for failure. */ int mpr_mapping_initialize(struct mpr_softc *sc) { uint16_t volume_mapping_flags, dpm_pg0_sz; uint32_t i; Mpi2ConfigReply_t mpi_reply; int error; uint8_t retry_count; uint16_t ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); /* The additional 1 accounts for the virtual enclosure * created for the controller */ sc->max_enclosures = sc->facts->MaxEnclosures + 1; sc->max_expanders = sc->facts->MaxSasExpanders; sc->max_volumes = sc->facts->MaxVolumes; sc->max_devices = sc->facts->MaxTargets + sc->max_volumes; sc->pending_map_events = 0; sc->num_enc_table_entries = 0; sc->num_rsvd_entries = 0; sc->max_dpm_entries = sc->ioc_pg8.MaxPersistentEntries; sc->is_dpm_enable = (sc->max_dpm_entries) ? 1 : 0; sc->track_mapping_events = 0; mpr_dprint(sc, MPR_MAPPING, "%s: Mapping table has a max of %d entries " "and DPM has a max of %d entries.\n", __func__, sc->max_devices, sc->max_dpm_entries); if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING) sc->is_dpm_enable = 0; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0) sc->num_rsvd_entries = 1; volume_mapping_flags = sc->ioc_pg8.IRVolumeMappingFlags & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; if (sc->ir_firmware && (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING)) sc->num_rsvd_entries += sc->max_volumes; error = mpr_mapping_allocate_memory(sc); if (error) return (error); for (i = 0; i < sc->max_devices; i++) _mapping_clear_map_entry(sc->mapping_table + i); for (i = 0; i < sc->max_enclosures; i++) _mapping_clear_enc_entry(sc->enclosure_table + i); for (i = 0; i < sc->max_devices; i++) { sc->removal_table[i].dev_handle = 0; sc->removal_table[i].dpm_entry_num = MPR_DPM_BAD_IDX; } memset(sc->dpm_entry_used, 0, sc->max_dpm_entries); memset(sc->dpm_flush_entry, 0, sc->max_dpm_entries); if (sc->is_dpm_enable) { dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) + (sc->max_dpm_entries * sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY)); retry_count = 0; retry_read_dpm: if (mpr_config_get_dpm_pg0(sc, &mpi_reply, sc->dpm_pg0, dpm_pg0_sz)) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: DPM page " "read failed.\n", __func__); if (retry_count < 3) { retry_count++; goto retry_read_dpm; } sc->is_dpm_enable = 0; } } if (sc->is_dpm_enable) { if (!_mapping_process_dpm_pg0(sc)) sc->is_dpm_enable = 0; } if (! sc->is_dpm_enable) { mpr_dprint(sc, MPR_MAPPING, "%s: DPM processing is disabled. " "Device mappings will not persist across reboots or " "resets.\n", __func__); } sc->track_mapping_events = 1; return 0; } /** * mpr_mapping_exit - clear mapping table and associated memory * @sc: per adapter object * * Returns nothing. */ void mpr_mapping_exit(struct mpr_softc *sc) { _mapping_flush_dpm_pages(sc); mpr_mapping_free_memory(sc); } /** * mpr_mapping_get_tid - return the target id for sas device and handle * @sc: per adapter object * @sas_address: sas address of the device * @handle: device handle * * Returns valid target ID on success or BAD_ID. */ unsigned int mpr_mapping_get_tid(struct mpr_softc *sc, uint64_t sas_address, u16 handle) { u32 map_idx; struct dev_mapping_table *mt_entry; for (map_idx = 0; map_idx < sc->max_devices; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->dev_handle == handle && mt_entry->physical_id == sas_address) return mt_entry->id; } return MPR_MAP_BAD_ID; } /** * mpr_mapping_get_tid_from_handle - find a target id in mapping table using * only the dev handle. This is just a wrapper function for the local function * _mapping_get_mt_idx_from_handle. * @sc: per adapter object * @handle: device handle * * Returns valid target ID on success or BAD_ID. */ unsigned int mpr_mapping_get_tid_from_handle(struct mpr_softc *sc, u16 handle) { return (_mapping_get_mt_idx_from_handle(sc, handle)); } /** * mpr_mapping_get_raid_tid - return the target id for raid device * @sc: per adapter object * @wwid: world wide identifier for raid volume * @volHandle: volume device handle * * Returns valid target ID on success or BAD_ID. */ unsigned int mpr_mapping_get_raid_tid(struct mpr_softc *sc, u64 wwid, u16 volHandle) { u32 start_idx, end_idx, map_idx; struct dev_mapping_table *mt_entry; _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) { if (mt_entry->dev_handle == volHandle && mt_entry->physical_id == wwid) return mt_entry->id; } return MPR_MAP_BAD_ID; } /** * mpr_mapping_get_raid_tid_from_handle - find raid device in mapping table * using only the volume dev handle. This is just a wrapper function for the * local function _mapping_get_ir_mt_idx_from_handle. * @sc: per adapter object * @volHandle: volume device handle * * Returns valid target ID on success or BAD_ID. */ unsigned int mpr_mapping_get_raid_tid_from_handle(struct mpr_softc *sc, u16 volHandle) { return (_mapping_get_ir_mt_idx_from_handle(sc, volHandle)); } /** * mpr_mapping_enclosure_dev_status_change_event - handle enclosure events * @sc: per adapter object * @event_data: event data payload * * Return nothing. */ void mpr_mapping_enclosure_dev_status_change_event(struct mpr_softc *sc, Mpi2EventDataSasEnclDevStatusChange_t *event_data) { u8 enc_idx, missing_count; struct enc_mapping_table *et_entry; Mpi2DriverMap0Entry_t *dpm_entry; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT; u8 update_phy_bits = 0; u32 saved_phy_bits; uint64_t temp64_var; if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) != MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) goto out; dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); if (event_data->ReasonCode == MPI2_EVENT_SAS_ENCL_RC_ADDED) { if (!event_data->NumSlots) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Enclosure " "with handle = 0x%x reported 0 slots.\n", __func__, le16toh(event_data->EnclosureHandle)); goto out; } temp64_var = event_data->EnclosureLogicalID.High; temp64_var = (temp64_var << 32) | event_data->EnclosureLogicalID.Low; enc_idx = _mapping_get_enc_idx_from_id(sc, temp64_var, event_data->PhyBits); /* * If the Added enclosure is already in the Enclosure Table, * make sure that all the the enclosure info is up to date. If * the enclosure was missing and has just been added back, or if * the enclosure's Phy Bits have changed, clear the missing * count and update the Phy Bits in the mapping table and in the * DPM, if it's being used. */ if (enc_idx != MPR_ENCTABLE_BAD_IDX) { et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->init_complete && !et_entry->missing_count) { mpr_dprint(sc, MPR_MAPPING, "%s: Enclosure %d " "is already present with handle = 0x%x\n", __func__, enc_idx, et_entry->enc_handle); goto out; } et_entry->enc_handle = le16toh(event_data-> EnclosureHandle); et_entry->start_slot = le16toh(event_data->StartSlot); saved_phy_bits = et_entry->phy_bits; et_entry->phy_bits |= le32toh(event_data->PhyBits); if (saved_phy_bits != et_entry->phy_bits) update_phy_bits = 1; if (et_entry->missing_count || update_phy_bits) { et_entry->missing_count = 0; if (sc->is_dpm_enable && et_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { dpm_entry += et_entry->dpm_entry_num; missing_count = (u8)(dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK); if (missing_count || update_phy_bits) { dpm_entry->MappingInformation = et_entry->num_slots; dpm_entry->MappingInformation <<= map_shift; dpm_entry->PhysicalBitsMapping = et_entry->phy_bits; sc->dpm_flush_entry[et_entry-> dpm_entry_num] = 1; } } } } else { /* * This is a new enclosure that is being added. * Initialize the Enclosure Table entry. It will be * finalized when a device is added for the enclosure * and the enclosure has enough space in the Mapping * Table to map its devices. */ enc_idx = sc->num_enc_table_entries; if (enc_idx >= sc->max_enclosures) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: " "Enclosure cannot be added to mapping " "table because it's full.\n", __func__); goto out; } sc->num_enc_table_entries++; et_entry = &sc->enclosure_table[enc_idx]; et_entry->enc_handle = le16toh(event_data-> EnclosureHandle); et_entry->enclosure_id = le64toh(event_data-> EnclosureLogicalID.High); et_entry->enclosure_id = ((et_entry->enclosure_id << 32) | le64toh(event_data->EnclosureLogicalID.Low)); et_entry->start_index = MPR_MAPTABLE_BAD_IDX; et_entry->dpm_entry_num = MPR_DPM_BAD_IDX; et_entry->num_slots = le16toh(event_data->NumSlots); et_entry->start_slot = le16toh(event_data->StartSlot); et_entry->phy_bits = le32toh(event_data->PhyBits); } et_entry->init_complete = 1; } else if (event_data->ReasonCode == MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING) { /* * An enclosure was removed. Update its missing count and then * update the DPM entry with the new missing count for the * enclosure. */ enc_idx = _mapping_get_enc_idx_from_handle(sc, le16toh(event_data->EnclosureHandle)); if (enc_idx == MPR_ENCTABLE_BAD_IDX) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: Cannot " "unmap enclosure %d because it has already been " "deleted.\n", __func__, enc_idx); goto out; } et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->missing_count < MPR_MAX_MISSING_COUNT) et_entry->missing_count++; if (sc->is_dpm_enable && et_entry->dpm_entry_num != MPR_DPM_BAD_IDX) { dpm_entry += et_entry->dpm_entry_num; dpm_entry->MappingInformation = et_entry->num_slots; dpm_entry->MappingInformation <<= map_shift; dpm_entry->MappingInformation |= et_entry->missing_count; sc->dpm_flush_entry[et_entry->dpm_entry_num] = 1; } et_entry->init_complete = 1; } out: _mapping_flush_dpm_pages(sc); if (sc->pending_map_events) sc->pending_map_events--; } /** * mpr_mapping_topology_change_event - handle topology change events * @sc: per adapter object * @event_data: event data payload * * Returns nothing. */ void mpr_mapping_topology_change_event(struct mpr_softc *sc, Mpi2EventDataSasTopologyChangeList_t *event_data) { struct _map_topology_change topo_change; struct _map_phy_change *phy_change; Mpi2EventSasTopoPhyEntry_t *event_phy_change; u8 i, num_entries; topo_change.enc_handle = le16toh(event_data->EnclosureHandle); topo_change.exp_handle = le16toh(event_data->ExpanderDevHandle); num_entries = event_data->NumEntries; topo_change.num_entries = num_entries; topo_change.start_phy_num = event_data->StartPhyNum; topo_change.num_phys = event_data->NumPhys; topo_change.exp_status = event_data->ExpStatus; event_phy_change = event_data->PHY; topo_change.phy_details = NULL; if (!num_entries) goto out; - phy_change = malloc(sizeof(struct _map_phy_change) * num_entries, + phy_change = mallocarray(num_entries, sizeof(struct _map_phy_change), M_MPR, M_NOWAIT|M_ZERO); topo_change.phy_details = phy_change; if (!phy_change) goto out; for (i = 0; i < num_entries; i++, event_phy_change++, phy_change++) { phy_change->dev_handle = le16toh(event_phy_change-> AttachedDevHandle); phy_change->reason = event_phy_change->PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK; } _mapping_update_missing_count(sc, &topo_change); _mapping_get_dev_info(sc, &topo_change); _mapping_clear_removed_entries(sc); _mapping_add_new_device(sc, &topo_change); out: free(topo_change.phy_details, M_MPR); _mapping_flush_dpm_pages(sc); if (sc->pending_map_events) sc->pending_map_events--; } /** * mpr_mapping_pcie_topology_change_event - handle PCIe topology change events * @sc: per adapter object * @event_data: event data payload * * Returns nothing. */ void mpr_mapping_pcie_topology_change_event(struct mpr_softc *sc, Mpi26EventDataPCIeTopologyChangeList_t *event_data) { struct _map_pcie_topology_change topo_change; struct _map_port_change *port_change; Mpi26EventPCIeTopoPortEntry_t *event_port_change; u8 i, num_entries; topo_change.switch_dev_handle = le16toh(event_data->SwitchDevHandle); topo_change.enc_handle = le16toh(event_data->EnclosureHandle); num_entries = event_data->NumEntries; topo_change.num_entries = num_entries; topo_change.start_port_num = event_data->StartPortNum; topo_change.num_ports = event_data->NumPorts; topo_change.switch_status = event_data->SwitchStatus; event_port_change = event_data->PortEntry; topo_change.port_details = NULL; if (!num_entries) goto out; - port_change = malloc(sizeof(struct _map_port_change) * num_entries, + port_change = mallocarray(num_entries, sizeof(struct _map_port_change), M_MPR, M_NOWAIT|M_ZERO); topo_change.port_details = port_change; if (!port_change) goto out; for (i = 0; i < num_entries; i++, event_port_change++, port_change++) { port_change->dev_handle = le16toh(event_port_change-> AttachedDevHandle); port_change->reason = event_port_change->PortStatus; } _mapping_update_pcie_missing_count(sc, &topo_change); _mapping_get_pcie_dev_info(sc, &topo_change); _mapping_clear_removed_entries(sc); _mapping_add_new_pcie_device(sc, &topo_change); out: free(topo_change.port_details, M_MPR); _mapping_flush_dpm_pages(sc); if (sc->pending_map_events) sc->pending_map_events--; } /** * mpr_mapping_ir_config_change_event - handle IR config change list events * @sc: per adapter object * @event_data: event data payload * * Returns nothing. */ void mpr_mapping_ir_config_change_event(struct mpr_softc *sc, Mpi2EventDataIrConfigChangeList_t *event_data) { Mpi2EventIrConfigElement_t *element; int i; u64 *wwid_table; u32 map_idx, flags; struct dev_mapping_table *mt_entry; u16 element_flags; - wwid_table = malloc(sizeof(u64) * event_data->NumElements, M_MPR, + wwid_table = mallocarray(event_data->NumElements, sizeof(u64), M_MPR, M_NOWAIT | M_ZERO); if (!wwid_table) goto out; element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; flags = le32toh(event_data->Flags); /* * For volume changes, get the WWID for the volume and put it in a * table to be used in the processing of the IR change event. */ for (i = 0; i < event_data->NumElements; i++, element++) { element_flags = le16toh(element->ElementFlags); if ((element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_ADDED) && (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_REMOVED) && (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE) && (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED)) continue; if ((element_flags & MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK) == MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT) { mpr_config_get_volume_wwid(sc, le16toh(element->VolDevHandle), &wwid_table[i]); } } /* * Check the ReasonCode for each element in the IR event and Add/Remove * Volumes or Physical Disks of Volumes to/from the mapping table. Use * the WWIDs gotten above in wwid_table. */ if (flags == MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) goto out; else { element = (Mpi2EventIrConfigElement_t *)&event_data-> ConfigElement[0]; for (i = 0; i < event_data->NumElements; i++, element++) { if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_ADDED || element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED) { map_idx = _mapping_get_ir_mt_idx_from_wwid (sc, wwid_table[i]); if (map_idx != MPR_MAPTABLE_BAD_IDX) { /* * The volume is already in the mapping * table. Just update it's info. */ mt_entry = &sc->mapping_table[map_idx]; mt_entry->id = map_idx; mt_entry->dev_handle = le16toh (element->VolDevHandle); mt_entry->device_info = MPR_DEV_RESERVED | MPR_MAP_IN_USE; _mapping_update_ir_missing_cnt(sc, map_idx, element, wwid_table[i]); continue; } /* * Volume is not in mapping table yet. Find a * free entry in the mapping table at the * volume mapping locations. If no entries are * available, this is an error because it means * there are more volumes than can be mapped * and that should never happen for volumes. */ map_idx = _mapping_get_free_ir_mt_idx(sc); if (map_idx == MPR_MAPTABLE_BAD_IDX) { mpr_dprint(sc, MPR_ERROR | MPR_MAPPING, "%s: failed to add the volume with " "handle 0x%04x because there is no " "free space available in the " "mapping table\n", __func__, le16toh(element->VolDevHandle)); continue; } mt_entry = &sc->mapping_table[map_idx]; mt_entry->physical_id = wwid_table[i]; mt_entry->id = map_idx; mt_entry->dev_handle = le16toh(element-> VolDevHandle); mt_entry->device_info = MPR_DEV_RESERVED | MPR_MAP_IN_USE; _mapping_update_ir_missing_cnt(sc, map_idx, element, wwid_table[i]); } else if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_REMOVED) { map_idx = _mapping_get_ir_mt_idx_from_wwid(sc, wwid_table[i]); if (map_idx == MPR_MAPTABLE_BAD_IDX) { mpr_dprint(sc, MPR_MAPPING,"%s: Failed " "to remove a volume because it has " "already been removed.\n", __func__); continue; } _mapping_update_ir_missing_cnt(sc, map_idx, element, wwid_table[i]); } else if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) { map_idx = _mapping_get_mt_idx_from_handle(sc, le16toh(element->VolDevHandle)); if (map_idx == MPR_MAPTABLE_BAD_IDX) { mpr_dprint(sc, MPR_MAPPING,"%s: Failed " "to remove volume with handle " "0x%04x because it has already " "been removed.\n", __func__, le16toh(element->VolDevHandle)); continue; } mt_entry = &sc->mapping_table[map_idx]; _mapping_update_ir_missing_cnt(sc, map_idx, element, mt_entry->physical_id); } } } out: _mapping_flush_dpm_pages(sc); free(wwid_table, M_MPR); if (sc->pending_map_events) sc->pending_map_events--; } Index: head/sys/dev/mps/mps.c =================================================================== --- head/sys/dev/mps/mps.c (revision 327948) +++ head/sys/dev/mps/mps.c (revision 327949) @@ -1,3055 +1,3055 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Yahoo! Inc. * Copyright (c) 2011-2015 LSI Corp. * Copyright (c) 2013-2015 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD * * $FreeBSD$ */ #include __FBSDID("$FreeBSD$"); /* Communications core for Avago Technologies (LSI) MPT2 */ /* TODO Move headers to mpsvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int mps_diag_reset(struct mps_softc *sc, int sleep_flag); static int mps_init_queues(struct mps_softc *sc); static void mps_resize_queues(struct mps_softc *sc); static int mps_message_unit_reset(struct mps_softc *sc, int sleep_flag); static int mps_transition_operational(struct mps_softc *sc); static int mps_iocfacts_allocate(struct mps_softc *sc, uint8_t attaching); static void mps_iocfacts_free(struct mps_softc *sc); static void mps_startup(void *arg); static int mps_send_iocinit(struct mps_softc *sc); static int mps_alloc_queues(struct mps_softc *sc); static int mps_alloc_hw_queues(struct mps_softc *sc); static int mps_alloc_replies(struct mps_softc *sc); static int mps_alloc_requests(struct mps_softc *sc); static int mps_attach_log(struct mps_softc *sc); static __inline void mps_complete_command(struct mps_softc *sc, struct mps_command *cm); static void mps_dispatch_event(struct mps_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *reply); static void mps_config_complete(struct mps_softc *sc, struct mps_command *cm); static void mps_periodic(void *); static int mps_reregister_events(struct mps_softc *sc); static void mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm); static int mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts); static int mps_wait_db_ack(struct mps_softc *sc, int timeout, int sleep_flag); static int mps_debug_sysctl(SYSCTL_HANDLER_ARGS); static void mps_parse_debug(struct mps_softc *sc, char *list); SYSCTL_NODE(_hw, OID_AUTO, mps, CTLFLAG_RD, 0, "MPS Driver Parameters"); MALLOC_DEFINE(M_MPT2, "mps", "mpt2 driver memory"); /* * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of * any state and back to its initialization state machine. */ static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d }; /* Added this union to smoothly convert le64toh cm->cm_desc.Words. * Compiler only support unint64_t to be passed as argument. * Otherwise it will throw below error * "aggregate value used where an integer was expected" */ typedef union _reply_descriptor { u64 word; struct { u32 low; u32 high; } u; }reply_descriptor,address_descriptor; /* Rate limit chain-fail messages to 1 per minute */ static struct timeval mps_chainfail_interval = { 60, 0 }; /* * sleep_flag can be either CAN_SLEEP or NO_SLEEP. * If this function is called from process context, it can sleep * and there is no harm to sleep, in case if this fuction is called * from Interrupt handler, we can not sleep and need NO_SLEEP flag set. * based on sleep flags driver will call either msleep, pause or DELAY. * msleep and pause are of same variant, but pause is used when mps_mtx * is not hold by driver. * */ static int mps_diag_reset(struct mps_softc *sc,int sleep_flag) { uint32_t reg; int i, error, tries = 0; uint8_t first_wait_done = FALSE; mps_dprint(sc, MPS_INIT, "%s entered\n", __func__); /* Clear any pending interrupts */ mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); /* * Force NO_SLEEP for threads prohibited to sleep * e.a Thread from interrupt handler are prohibited to sleep. */ if (curthread->td_no_sleeping != 0) sleep_flag = NO_SLEEP; mps_dprint(sc, MPS_INIT, "sequence start, sleep_flag= %d\n", sleep_flag); /* Push the magic sequence */ error = ETIMEDOUT; while (tries++ < 20) { for (i = 0; i < sizeof(mpt2_reset_magic); i++) mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, mpt2_reset_magic[i]); /* wait 100 msec */ if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP) msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0, "mpsdiag", hz/10); else if (sleep_flag == CAN_SLEEP) pause("mpsdiag", hz/10); else DELAY(100 * 1000); reg = mps_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) { error = 0; break; } } if (error) { mps_dprint(sc, MPS_INIT, "sequence failed, error=%d, exit\n", error); return (error); } /* Send the actual reset. XXX need to refresh the reg? */ reg |= MPI2_DIAG_RESET_ADAPTER; mps_dprint(sc, MPS_INIT, "sequence success, sending reset, reg= 0x%x\n", reg); mps_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, reg); /* Wait up to 300 seconds in 50ms intervals */ error = ETIMEDOUT; for (i = 0; i < 6000; i++) { /* * Wait 50 msec. If this is the first time through, wait 256 * msec to satisfy Diag Reset timing requirements. */ if (first_wait_done) { if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP) msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0, "mpsdiag", hz/20); else if (sleep_flag == CAN_SLEEP) pause("mpsdiag", hz/20); else DELAY(50 * 1000); } else { DELAY(256 * 1000); first_wait_done = TRUE; } /* * Check for the RESET_ADAPTER bit to be cleared first, then * wait for the RESET state to be cleared, which takes a little * longer. */ reg = mps_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); if (reg & MPI2_DIAG_RESET_ADAPTER) { continue; } reg = mps_regread(sc, MPI2_DOORBELL_OFFSET); if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) { error = 0; break; } } if (error) { mps_dprint(sc, MPS_INIT, "reset failed, error= %d, exit\n", error); return (error); } mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0); mps_dprint(sc, MPS_INIT, "diag reset success, exit\n"); return (0); } static int mps_message_unit_reset(struct mps_softc *sc, int sleep_flag) { int error; MPS_FUNCTRACE(sc); mps_dprint(sc, MPS_INIT, "%s entered\n", __func__); error = 0; mps_regwrite(sc, MPI2_DOORBELL_OFFSET, MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET << MPI2_DOORBELL_FUNCTION_SHIFT); if (mps_wait_db_ack(sc, 5, sleep_flag) != 0) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "Doorbell handshake failed\n"); error = ETIMEDOUT; } mps_dprint(sc, MPS_INIT, "%s exit\n", __func__); return (error); } static int mps_transition_ready(struct mps_softc *sc) { uint32_t reg, state; int error, tries = 0; int sleep_flags; MPS_FUNCTRACE(sc); /* If we are in attach call, do not sleep */ sleep_flags = (sc->mps_flags & MPS_FLAGS_ATTACH_DONE) ? CAN_SLEEP:NO_SLEEP; error = 0; mps_dprint(sc, MPS_INIT, "%s entered, sleep_flags= %d\n", __func__, sleep_flags); while (tries++ < 1200) { reg = mps_regread(sc, MPI2_DOORBELL_OFFSET); mps_dprint(sc, MPS_INIT, " Doorbell= 0x%x\n", reg); /* * Ensure the IOC is ready to talk. If it's not, try * resetting it. */ if (reg & MPI2_DOORBELL_USED) { mps_dprint(sc, MPS_INIT, " Not ready, sending diag " "reset\n"); mps_diag_reset(sc, sleep_flags); DELAY(50000); continue; } /* Is the adapter owned by another peer? */ if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) == (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "IOC is under the " "control of another peer host, aborting " "initialization.\n"); error = ENXIO; break; } state = reg & MPI2_IOC_STATE_MASK; if (state == MPI2_IOC_STATE_READY) { /* Ready to go! */ error = 0; break; } else if (state == MPI2_IOC_STATE_FAULT) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "IOC in fault " "state 0x%x, resetting\n", state & MPI2_DOORBELL_FAULT_CODE_MASK); mps_diag_reset(sc, sleep_flags); } else if (state == MPI2_IOC_STATE_OPERATIONAL) { /* Need to take ownership */ mps_message_unit_reset(sc, sleep_flags); } else if (state == MPI2_IOC_STATE_RESET) { /* Wait a bit, IOC might be in transition */ mps_dprint(sc, MPS_INIT|MPS_FAULT, "IOC in unexpected reset state\n"); } else { mps_dprint(sc, MPS_INIT|MPS_FAULT, "IOC in unknown state 0x%x\n", state); error = EINVAL; break; } /* Wait 50ms for things to settle down. */ DELAY(50000); } if (error) mps_dprint(sc, MPS_INIT|MPS_FAULT, "Cannot transition IOC to ready\n"); mps_dprint(sc, MPS_INIT, "%s exit\n", __func__); return (error); } static int mps_transition_operational(struct mps_softc *sc) { uint32_t reg, state; int error; MPS_FUNCTRACE(sc); error = 0; reg = mps_regread(sc, MPI2_DOORBELL_OFFSET); mps_dprint(sc, MPS_INIT, "%s entered, Doorbell= 0x%x\n", __func__, reg); state = reg & MPI2_IOC_STATE_MASK; if (state != MPI2_IOC_STATE_READY) { mps_dprint(sc, MPS_INIT, "IOC not ready\n"); if ((error = mps_transition_ready(sc)) != 0) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "failed to transition ready, exit\n"); return (error); } } error = mps_send_iocinit(sc); mps_dprint(sc, MPS_INIT, "%s exit\n", __func__); return (error); } static void mps_resize_queues(struct mps_softc *sc) { int reqcr, prireqcr; /* * Size the queues. Since the reply queues always need one free * entry, we'll deduct one reply message here. The LSI documents * suggest instead to add a count to the request queue, but I think * that it's better to deduct from reply queue. */ prireqcr = MAX(1, sc->max_prireqframes); prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit); reqcr = MAX(2, sc->max_reqframes); reqcr = MIN(reqcr, sc->facts->RequestCredit); sc->num_reqs = prireqcr + reqcr; sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes, sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; /* * Figure out the number of MSIx-based queues. If the firmware or * user has done something crazy and not allowed enough credit for * the queues to be useful then don't enable multi-queue. */ if (sc->facts->MaxMSIxVectors < 2) sc->msi_msgs = 1; if (sc->msi_msgs > 1) { sc->msi_msgs = MIN(sc->msi_msgs, mp_ncpus); sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors); if (sc->num_reqs / sc->msi_msgs < 2) sc->msi_msgs = 1; } mps_dprint(sc, MPS_INIT, "Sized queues to q=%d reqs=%d replies=%d\n", sc->msi_msgs, sc->num_reqs, sc->num_replies); } /* * This is called during attach and when re-initializing due to a Diag Reset. * IOC Facts is used to allocate many of the structures needed by the driver. * If called from attach, de-allocation is not required because the driver has * not allocated any structures yet, but if called from a Diag Reset, previously * allocated structures based on IOC Facts will need to be freed and re- * allocated bases on the latest IOC Facts. */ static int mps_iocfacts_allocate(struct mps_softc *sc, uint8_t attaching) { int error; Mpi2IOCFactsReply_t saved_facts; uint8_t saved_mode, reallocating; mps_dprint(sc, MPS_INIT|MPS_TRACE, "%s entered\n", __func__); /* Save old IOC Facts and then only reallocate if Facts have changed */ if (!attaching) { bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY)); } /* * Get IOC Facts. In all cases throughout this function, panic if doing * a re-initialization and only return the error if attaching so the OS * can handle it. */ if ((error = mps_get_iocfacts(sc, sc->facts)) != 0) { if (attaching) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to get " "IOC Facts with error %d, exit\n", error); return (error); } else { panic("%s failed to get IOC Facts with error %d\n", __func__, error); } } MPS_DPRINT_PAGE(sc, MPS_XINFO, iocfacts, sc->facts); snprintf(sc->fw_version, sizeof(sc->fw_version), "%02d.%02d.%02d.%02d", sc->facts->FWVersion.Struct.Major, sc->facts->FWVersion.Struct.Minor, sc->facts->FWVersion.Struct.Unit, sc->facts->FWVersion.Struct.Dev); mps_dprint(sc, MPS_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version, MPS_DRIVER_VERSION); mps_dprint(sc, MPS_INFO, "IOCCapabilities: %b\n", sc->facts->IOCCapabilities, "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf" "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR" "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc"); /* * If the chip doesn't support event replay then a hard reset will be * required to trigger a full discovery. Do the reset here then * retransition to Ready. A hard reset might have already been done, * but it doesn't hurt to do it again. Only do this if attaching, not * for a Diag Reset. */ if (attaching && ((sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0)) { mps_dprint(sc, MPS_INIT, "No event replay, reseting\n"); mps_diag_reset(sc, NO_SLEEP); if ((error = mps_transition_ready(sc)) != 0) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to " "transition to ready with error %d, exit\n", error); return (error); } } /* * Set flag if IR Firmware is loaded. If the RAID Capability has * changed from the previous IOC Facts, log a warning, but only if * checking this after a Diag Reset and not during attach. */ saved_mode = sc->ir_firmware; if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) sc->ir_firmware = 1; if (!attaching) { if (sc->ir_firmware != saved_mode) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "new IR/IT mode " "in IOC Facts does not match previous mode\n"); } } /* Only deallocate and reallocate if relevant IOC Facts have changed */ reallocating = FALSE; sc->mps_flags &= ~MPS_FLAGS_REALLOCATED; if ((!attaching) && ((saved_facts.MsgVersion != sc->facts->MsgVersion) || (saved_facts.HeaderVersion != sc->facts->HeaderVersion) || (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) || (saved_facts.RequestCredit != sc->facts->RequestCredit) || (saved_facts.ProductID != sc->facts->ProductID) || (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) || (saved_facts.IOCRequestFrameSize != sc->facts->IOCRequestFrameSize) || (saved_facts.MaxTargets != sc->facts->MaxTargets) || (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) || (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) || (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) || (saved_facts.MaxReplyDescriptorPostQueueDepth != sc->facts->MaxReplyDescriptorPostQueueDepth) || (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) || (saved_facts.MaxVolumes != sc->facts->MaxVolumes) || (saved_facts.MaxPersistentEntries != sc->facts->MaxPersistentEntries))) { reallocating = TRUE; /* Record that we reallocated everything */ sc->mps_flags |= MPS_FLAGS_REALLOCATED; } /* * Some things should be done if attaching or re-allocating after a Diag * Reset, but are not needed after a Diag Reset if the FW has not * changed. */ if (attaching || reallocating) { /* * Check if controller supports FW diag buffers and set flag to * enable each type. */ if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE]. enabled = TRUE; if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT]. enabled = TRUE; if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED]. enabled = TRUE; /* * Set flag if EEDP is supported and if TLR is supported. */ if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) sc->eedp_enabled = TRUE; if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) sc->control_TLR = TRUE; mps_resize_queues(sc); /* * Initialize all Tail Queues */ TAILQ_INIT(&sc->req_list); TAILQ_INIT(&sc->high_priority_req_list); TAILQ_INIT(&sc->chain_list); TAILQ_INIT(&sc->tm_list); } /* * If doing a Diag Reset and the FW is significantly different * (reallocating will be set above in IOC Facts comparison), then all * buffers based on the IOC Facts will need to be freed before they are * reallocated. */ if (reallocating) { mps_iocfacts_free(sc); mpssas_realloc_targets(sc, saved_facts.MaxTargets + saved_facts.MaxVolumes); } /* * Any deallocation has been completed. Now start reallocating * if needed. Will only need to reallocate if attaching or if the new * IOC Facts are different from the previous IOC Facts after a Diag * Reset. Targets have already been allocated above if needed. */ error = 0; while (attaching || reallocating) { if ((error = mps_alloc_hw_queues(sc)) != 0) break; if ((error = mps_alloc_replies(sc)) != 0) break; if ((error = mps_alloc_requests(sc)) != 0) break; if ((error = mps_alloc_queues(sc)) != 0) break; break; } if (error) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to alloc queues with error %d\n", error); mps_free(sc); return (error); } /* Always initialize the queues */ bzero(sc->free_queue, sc->fqdepth * 4); mps_init_queues(sc); /* * Always get the chip out of the reset state, but only panic if not * attaching. If attaching and there is an error, that is handled by * the OS. */ error = mps_transition_operational(sc); if (error != 0) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to " "transition to operational with error %d\n", error); mps_free(sc); return (error); } /* * Finish the queue initialization. * These are set here instead of in mps_init_queues() because the * IOC resets these values during the state transition in * mps_transition_operational(). The free index is set to 1 * because the corresponding index in the IOC is set to 0, and the * IOC treats the queues as full if both are set to the same value. * Hence the reason that the queue can't hold all of the possible * replies. */ sc->replypostindex = 0; mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0); /* * Attach the subsystems so they can prepare their event masks. * XXX Should be dynamic so that IM/IR and user modules can attach */ error = 0; while (attaching) { mps_dprint(sc, MPS_INIT, "Attaching subsystems\n"); if ((error = mps_attach_log(sc)) != 0) break; if ((error = mps_attach_sas(sc)) != 0) break; if ((error = mps_attach_user(sc)) != 0) break; break; } if (error) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to attach all " "subsystems: error %d\n", error); mps_free(sc); return (error); } /* * XXX If the number of MSI-X vectors changes during re-init, this * won't see it and adjust. */ if (attaching && (error = mps_pci_setup_interrupts(sc)) != 0) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "Failed to setup " "interrupts\n"); mps_free(sc); return (error); } /* * Set flag if this is a WD controller. This shouldn't ever change, but * reset it after a Diag Reset, just in case. */ sc->WD_available = FALSE; if (pci_get_device(sc->mps_dev) == MPI2_MFGPAGE_DEVID_SSS6200) sc->WD_available = TRUE; return (error); } /* * This is called if memory is being free (during detach for example) and when * buffers need to be reallocated due to a Diag Reset. */ static void mps_iocfacts_free(struct mps_softc *sc) { struct mps_command *cm; int i; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); if (sc->free_busaddr != 0) bus_dmamap_unload(sc->queues_dmat, sc->queues_map); if (sc->free_queue != NULL) bus_dmamem_free(sc->queues_dmat, sc->free_queue, sc->queues_map); if (sc->queues_dmat != NULL) bus_dma_tag_destroy(sc->queues_dmat); if (sc->chain_busaddr != 0) bus_dmamap_unload(sc->chain_dmat, sc->chain_map); if (sc->chain_frames != NULL) bus_dmamem_free(sc->chain_dmat, sc->chain_frames, sc->chain_map); if (sc->chain_dmat != NULL) bus_dma_tag_destroy(sc->chain_dmat); if (sc->sense_busaddr != 0) bus_dmamap_unload(sc->sense_dmat, sc->sense_map); if (sc->sense_frames != NULL) bus_dmamem_free(sc->sense_dmat, sc->sense_frames, sc->sense_map); if (sc->sense_dmat != NULL) bus_dma_tag_destroy(sc->sense_dmat); if (sc->reply_busaddr != 0) bus_dmamap_unload(sc->reply_dmat, sc->reply_map); if (sc->reply_frames != NULL) bus_dmamem_free(sc->reply_dmat, sc->reply_frames, sc->reply_map); if (sc->reply_dmat != NULL) bus_dma_tag_destroy(sc->reply_dmat); if (sc->req_busaddr != 0) bus_dmamap_unload(sc->req_dmat, sc->req_map); if (sc->req_frames != NULL) bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); if (sc->req_dmat != NULL) bus_dma_tag_destroy(sc->req_dmat); if (sc->chains != NULL) free(sc->chains, M_MPT2); if (sc->commands != NULL) { for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); } free(sc->commands, M_MPT2); } if (sc->buffer_dmat != NULL) bus_dma_tag_destroy(sc->buffer_dmat); mps_pci_free_interrupts(sc); free(sc->queues, M_MPT2); sc->queues = NULL; } /* * The terms diag reset and hard reset are used interchangeably in the MPI * docs to mean resetting the controller chip. In this code diag reset * cleans everything up, and the hard reset function just sends the reset * sequence to the chip. This should probably be refactored so that every * subsystem gets a reset notification of some sort, and can clean up * appropriately. */ int mps_reinit(struct mps_softc *sc) { int error; struct mpssas_softc *sassc; sassc = sc->sassc; MPS_FUNCTRACE(sc); mtx_assert(&sc->mps_mtx, MA_OWNED); mps_dprint(sc, MPS_INIT|MPS_INFO, "Reinitializing controller\n"); if (sc->mps_flags & MPS_FLAGS_DIAGRESET) { mps_dprint(sc, MPS_INIT, "Reset already in progress\n"); return 0; } /* make sure the completion callbacks can recognize they're getting * a NULL cm_reply due to a reset. */ sc->mps_flags |= MPS_FLAGS_DIAGRESET; /* * Mask interrupts here. */ mps_dprint(sc, MPS_INIT, "masking interrupts and resetting\n"); mps_mask_intr(sc); error = mps_diag_reset(sc, CAN_SLEEP); if (error != 0) { /* XXXSL No need to panic here */ panic("%s hard reset failed with error %d\n", __func__, error); } /* Restore the PCI state, including the MSI-X registers */ mps_pci_restore(sc); /* Give the I/O subsystem special priority to get itself prepared */ mpssas_handle_reinit(sc); /* * Get IOC Facts and allocate all structures based on this information. * The attach function will also call mps_iocfacts_allocate at startup. * If relevant values have changed in IOC Facts, this function will free * all of the memory based on IOC Facts and reallocate that memory. */ if ((error = mps_iocfacts_allocate(sc, FALSE)) != 0) { panic("%s IOC Facts based allocation failed with error %d\n", __func__, error); } /* * Mapping structures will be re-allocated after getting IOC Page8, so * free these structures here. */ mps_mapping_exit(sc); /* * The static page function currently read is IOC Page8. Others can be * added in future. It's possible that the values in IOC Page8 have * changed after a Diag Reset due to user modification, so always read * these. Interrupts are masked, so unmask them before getting config * pages. */ mps_unmask_intr(sc); sc->mps_flags &= ~MPS_FLAGS_DIAGRESET; mps_base_static_config_pages(sc); /* * Some mapping info is based in IOC Page8 data, so re-initialize the * mapping tables. */ mps_mapping_initialize(sc); /* * Restart will reload the event masks clobbered by the reset, and * then enable the port. */ mps_reregister_events(sc); /* the end of discovery will release the simq, so we're done. */ mps_dprint(sc, MPS_INIT|MPS_XINFO, "Finished sc %p post %u free %u\n", sc, sc->replypostindex, sc->replyfreeindex); mpssas_release_simq_reinit(sassc); mps_dprint(sc, MPS_INIT, "%s exit\n", __func__); return 0; } /* Wait for the chip to ACK a word that we've put into its FIFO * Wait for seconds. In single loop wait for busy loop * for 500 microseconds. * Total is [ 0.5 * (2000 * ) ] in miliseconds. * */ static int mps_wait_db_ack(struct mps_softc *sc, int timeout, int sleep_flag) { u32 cntdn, count; u32 int_status; u32 doorbell; count = 0; cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; do { int_status = mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { mps_dprint(sc, MPS_TRACE, "%s: successful count(%d), timeout(%d)\n", __func__, count, timeout); return 0; } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { doorbell = mps_regread(sc, MPI2_DOORBELL_OFFSET); if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { mps_dprint(sc, MPS_FAULT, "fault_state(0x%04x)!\n", doorbell); return (EFAULT); } } else if (int_status == 0xFFFFFFFF) goto out; /* If it can sleep, sleep for 1 milisecond, else busy loop for * 0.5 milisecond */ if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP) msleep(&sc->msleep_fake_chan, &sc->mps_mtx, 0, "mpsdba", hz/1000); else if (sleep_flag == CAN_SLEEP) pause("mpsdba", hz/1000); else DELAY(500); count++; } while (--cntdn); out: mps_dprint(sc, MPS_FAULT, "%s: failed due to timeout count(%d), " "int_status(%x)!\n", __func__, count, int_status); return (ETIMEDOUT); } /* Wait for the chip to signal that the next word in its FIFO can be fetched */ static int mps_wait_db_int(struct mps_softc *sc) { int retry; for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) { if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & MPI2_HIS_IOC2SYS_DB_STATUS) != 0) return (0); DELAY(2000); } return (ETIMEDOUT); } /* Step through the synchronous command state machine, i.e. "Doorbell mode" */ static int mps_request_sync(struct mps_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply, int req_sz, int reply_sz, int timeout) { uint32_t *data32; uint16_t *data16; int i, count, ioc_sz, residual; int sleep_flags = CAN_SLEEP; if (curthread->td_no_sleeping != 0) sleep_flags = NO_SLEEP; /* Step 1 */ mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); /* Step 2 */ if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) return (EBUSY); /* Step 3 * Announce that a message is coming through the doorbell. Messages * are pushed at 32bit words, so round up if needed. */ count = (req_sz + 3) / 4; mps_regwrite(sc, MPI2_DOORBELL_OFFSET, (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) | (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT)); /* Step 4 */ if (mps_wait_db_int(sc) || (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) { mps_dprint(sc, MPS_FAULT, "Doorbell failed to activate\n"); return (ENXIO); } mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); if (mps_wait_db_ack(sc, 5, sleep_flags) != 0) { mps_dprint(sc, MPS_FAULT, "Doorbell handshake failed\n"); return (ENXIO); } /* Step 5 */ /* Clock out the message data synchronously in 32-bit dwords*/ data32 = (uint32_t *)req; for (i = 0; i < count; i++) { mps_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i])); if (mps_wait_db_ack(sc, 5, sleep_flags) != 0) { mps_dprint(sc, MPS_FAULT, "Timeout while writing doorbell\n"); return (ENXIO); } } /* Step 6 */ /* Clock in the reply in 16-bit words. The total length of the * message is always in the 4th byte, so clock out the first 2 words * manually, then loop the rest. */ data16 = (uint16_t *)reply; if (mps_wait_db_int(sc) != 0) { mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 0\n"); return (ENXIO); } data16[0] = mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); if (mps_wait_db_int(sc) != 0) { mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 1\n"); return (ENXIO); } data16[1] = mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); /* Number of 32bit words in the message */ ioc_sz = reply->MsgLength; /* * Figure out how many 16bit words to clock in without overrunning. * The precision loss with dividing reply_sz can safely be * ignored because the messages can only be multiples of 32bits. */ residual = 0; count = MIN((reply_sz / 4), ioc_sz) * 2; if (count < ioc_sz * 2) { residual = ioc_sz * 2 - count; mps_dprint(sc, MPS_ERROR, "Driver error, throwing away %d " "residual message words\n", residual); } for (i = 2; i < count; i++) { if (mps_wait_db_int(sc) != 0) { mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell %d\n", i); return (ENXIO); } data16[i] = mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); } /* * Pull out residual words that won't fit into the provided buffer. * This keeps the chip from hanging due to a driver programming * error. */ while (residual--) { if (mps_wait_db_int(sc) != 0) { mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell\n"); return (ENXIO); } (void)mps_regread(sc, MPI2_DOORBELL_OFFSET); mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); } /* Step 7 */ if (mps_wait_db_int(sc) != 0) { mps_dprint(sc, MPS_FAULT, "Timeout waiting to exit doorbell\n"); return (ENXIO); } if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) mps_dprint(sc, MPS_FAULT, "Warning, doorbell still active\n"); mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); return (0); } static void mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm) { reply_descriptor rd; MPS_FUNCTRACE(sc); mps_dprint(sc, MPS_TRACE, "SMID %u cm %p ccb %p\n", cm->cm_desc.Default.SMID, cm, cm->cm_ccb); if (sc->mps_flags & MPS_FLAGS_ATTACH_DONE && !(sc->mps_flags & MPS_FLAGS_SHUTDOWN)) mtx_assert(&sc->mps_mtx, MA_OWNED); if (++sc->io_cmds_active > sc->io_cmds_highwater) sc->io_cmds_highwater++; rd.u.low = cm->cm_desc.Words.Low; rd.u.high = cm->cm_desc.Words.High; rd.word = htole64(rd.word); /* TODO-We may need to make below regwrite atomic */ mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET, rd.u.low); mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET, rd.u.high); } /* * Just the FACTS, ma'am. */ static int mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts) { MPI2_DEFAULT_REPLY *reply; MPI2_IOC_FACTS_REQUEST request; int error, req_sz, reply_sz; MPS_FUNCTRACE(sc); mps_dprint(sc, MPS_INIT, "%s entered\n", __func__); req_sz = sizeof(MPI2_IOC_FACTS_REQUEST); reply_sz = sizeof(MPI2_IOC_FACTS_REPLY); reply = (MPI2_DEFAULT_REPLY *)facts; bzero(&request, req_sz); request.Function = MPI2_FUNCTION_IOC_FACTS; error = mps_request_sync(sc, &request, reply, req_sz, reply_sz, 5); mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error); return (error); } static int mps_send_iocinit(struct mps_softc *sc) { MPI2_IOC_INIT_REQUEST init; MPI2_DEFAULT_REPLY reply; int req_sz, reply_sz, error; struct timeval now; uint64_t time_in_msec; MPS_FUNCTRACE(sc); mps_dprint(sc, MPS_INIT, "%s entered\n", __func__); req_sz = sizeof(MPI2_IOC_INIT_REQUEST); reply_sz = sizeof(MPI2_IOC_INIT_REPLY); bzero(&init, req_sz); bzero(&reply, reply_sz); /* * Fill in the init block. Note that most addresses are * deliberately in the lower 32bits of memory. This is a micro- * optimzation for PCI/PCIX, though it's not clear if it helps PCIe. */ init.Function = MPI2_FUNCTION_IOC_INIT; init.WhoInit = MPI2_WHOINIT_HOST_DRIVER; init.MsgVersion = htole16(MPI2_VERSION); init.HeaderVersion = htole16(MPI2_HEADER_VERSION); init.SystemRequestFrameSize = htole16(sc->facts->IOCRequestFrameSize); init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth); init.ReplyFreeQueueDepth = htole16(sc->fqdepth); init.SenseBufferAddressHigh = 0; init.SystemReplyAddressHigh = 0; init.SystemRequestFrameBaseAddress.High = 0; init.SystemRequestFrameBaseAddress.Low = htole32((uint32_t)sc->req_busaddr); init.ReplyDescriptorPostQueueAddress.High = 0; init.ReplyDescriptorPostQueueAddress.Low = htole32((uint32_t)sc->post_busaddr); init.ReplyFreeQueueAddress.High = 0; init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr); getmicrotime(&now); time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000); init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF); init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF); error = mps_request_sync(sc, &init, &reply, req_sz, reply_sz, 5); if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) error = ENXIO; mps_dprint(sc, MPS_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus); mps_dprint(sc, MPS_INIT, "%s exit\n", __func__); return (error); } void mps_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *addr; addr = arg; *addr = segs[0].ds_addr; } static int mps_alloc_queues(struct mps_softc *sc) { struct mps_queue *q; - int nq, i; + u_int nq, i; nq = sc->msi_msgs; mps_dprint(sc, MPS_INIT|MPS_XINFO, "Allocating %d I/O queues\n", nq); - sc->queues = malloc(sizeof(struct mps_queue) * nq, M_MPT2, + sc->queues = mallocarray(nq, sizeof(struct mps_queue), M_MPT2, M_NOWAIT|M_ZERO); if (sc->queues == NULL) return (ENOMEM); for (i = 0; i < nq; i++) { q = &sc->queues[i]; mps_dprint(sc, MPS_INIT, "Configuring queue %d %p\n", i, q); q->sc = sc; q->qnum = i; } return (0); } static int mps_alloc_hw_queues(struct mps_softc *sc) { bus_addr_t queues_busaddr; uint8_t *queues; int qsize, fqsize, pqsize; /* * The reply free queue contains 4 byte entries in multiples of 16 and * aligned on a 16 byte boundary. There must always be an unused entry. * This queue supplies fresh reply frames for the firmware to use. * * The reply descriptor post queue contains 8 byte entries in * multiples of 16 and aligned on a 16 byte boundary. This queue * contains filled-in reply frames sent from the firmware to the host. * * These two queues are allocated together for simplicity. */ sc->fqdepth = roundup2(sc->num_replies + 1, 16); sc->pqdepth = roundup2(sc->num_replies + 1, 16); fqsize= sc->fqdepth * 4; pqsize = sc->pqdepth * 8; qsize = fqsize + pqsize; if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 16, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ qsize, /* maxsize */ 1, /* nsegments */ qsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->queues_dmat)) { mps_dprint(sc, MPS_ERROR, "Cannot allocate queues DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, &sc->queues_map)) { mps_dprint(sc, MPS_ERROR, "Cannot allocate queues memory\n"); return (ENOMEM); } bzero(queues, qsize); bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, mps_memaddr_cb, &queues_busaddr, 0); sc->free_queue = (uint32_t *)queues; sc->free_busaddr = queues_busaddr; sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); sc->post_busaddr = queues_busaddr + fqsize; return (0); } static int mps_alloc_replies(struct mps_softc *sc) { int rsize, num_replies; /* * sc->num_replies should be one less than sc->fqdepth. We need to * allocate space for sc->fqdepth replies, but only sc->num_replies * replies can be used at once. */ num_replies = max(sc->fqdepth, sc->num_replies); rsize = sc->facts->ReplyFrameSize * num_replies * 4; if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 4, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ rsize, /* maxsize */ 1, /* nsegments */ rsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->reply_dmat)) { mps_dprint(sc, MPS_ERROR, "Cannot allocate replies DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, BUS_DMA_NOWAIT, &sc->reply_map)) { mps_dprint(sc, MPS_ERROR, "Cannot allocate replies memory\n"); return (ENOMEM); } bzero(sc->reply_frames, rsize); bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, mps_memaddr_cb, &sc->reply_busaddr, 0); return (0); } static int mps_alloc_requests(struct mps_softc *sc) { struct mps_command *cm; struct mps_chain *chain; int i, rsize, nsegs; rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4; if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 16, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ rsize, /* maxsize */ 1, /* nsegments */ rsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->req_dmat)) { mps_dprint(sc, MPS_ERROR, "Cannot allocate request DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, BUS_DMA_NOWAIT, &sc->req_map)) { mps_dprint(sc, MPS_ERROR, "Cannot allocate request memory\n"); return (ENOMEM); } bzero(sc->req_frames, rsize); bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, mps_memaddr_cb, &sc->req_busaddr, 0); rsize = sc->facts->IOCRequestFrameSize * sc->max_chains * 4; if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 16, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ rsize, /* maxsize */ 1, /* nsegments */ rsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->chain_dmat)) { mps_dprint(sc, MPS_ERROR, "Cannot allocate chain DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, BUS_DMA_NOWAIT, &sc->chain_map)) { mps_dprint(sc, MPS_ERROR, "Cannot allocate chain memory\n"); return (ENOMEM); } bzero(sc->chain_frames, rsize); bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize, mps_memaddr_cb, &sc->chain_busaddr, 0); rsize = MPS_SENSE_LEN * sc->num_reqs; if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ rsize, /* maxsize */ 1, /* nsegments */ rsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->sense_dmat)) { mps_dprint(sc, MPS_ERROR, "Cannot allocate sense DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, BUS_DMA_NOWAIT, &sc->sense_map)) { mps_dprint(sc, MPS_ERROR, "Cannot allocate sense memory\n"); return (ENOMEM); } bzero(sc->sense_frames, rsize); bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, mps_memaddr_cb, &sc->sense_busaddr, 0); sc->chains = malloc(sizeof(struct mps_chain) * sc->max_chains, M_MPT2, M_WAITOK | M_ZERO); if(!sc->chains) { mps_dprint(sc, MPS_ERROR, "Cannot allocate chains memory\n"); return (ENOMEM); } for (i = 0; i < sc->max_chains; i++) { chain = &sc->chains[i]; chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames + i * sc->facts->IOCRequestFrameSize * 4); chain->chain_busaddr = sc->chain_busaddr + i * sc->facts->IOCRequestFrameSize * 4; mps_free_chain(sc, chain); sc->chain_free_lowwater++; } /* XXX Need to pick a more precise value */ nsegs = (MAXPHYS / PAGE_SIZE) + 1; if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ nsegs, /* nsegments */ BUS_SPACE_MAXSIZE_24BIT,/* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->mps_mtx, /* lockarg */ &sc->buffer_dmat)) { mps_dprint(sc, MPS_ERROR, "Cannot allocate buffer DMA tag\n"); return (ENOMEM); } /* * SMID 0 cannot be used as a free command per the firmware spec. * Just drop that command instead of risking accounting bugs. */ sc->commands = malloc(sizeof(struct mps_command) * sc->num_reqs, M_MPT2, M_WAITOK | M_ZERO); if(!sc->commands) { mps_dprint(sc, MPS_ERROR, "Cannot allocate command memory\n"); return (ENOMEM); } for (i = 1; i < sc->num_reqs; i++) { cm = &sc->commands[i]; cm->cm_req = sc->req_frames + i * sc->facts->IOCRequestFrameSize * 4; cm->cm_req_busaddr = sc->req_busaddr + i * sc->facts->IOCRequestFrameSize * 4; cm->cm_sense = &sc->sense_frames[i]; cm->cm_sense_busaddr = sc->sense_busaddr + i * MPS_SENSE_LEN; cm->cm_desc.Default.SMID = i; cm->cm_sc = sc; TAILQ_INIT(&cm->cm_chain_list); callout_init_mtx(&cm->cm_callout, &sc->mps_mtx, 0); /* XXX Is a failure here a critical problem? */ if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0) if (i <= sc->facts->HighPriorityCredit) mps_free_high_priority_command(sc, cm); else mps_free_command(sc, cm); else { panic("failed to allocate command %d\n", i); sc->num_reqs = i; break; } } return (0); } static int mps_init_queues(struct mps_softc *sc) { int i; memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); /* * According to the spec, we need to use one less reply than we * have space for on the queue. So sc->num_replies (the number we * use) should be less than sc->fqdepth (allocated size). */ if (sc->num_replies >= sc->fqdepth) return (EINVAL); /* * Initialize all of the free queue entries. */ for (i = 0; i < sc->fqdepth; i++) sc->free_queue[i] = sc->reply_busaddr + (i * sc->facts->ReplyFrameSize * 4); sc->replyfreeindex = sc->num_replies; return (0); } /* Get the driver parameter tunables. Lowest priority are the driver defaults. * Next are the global settings, if they exist. Highest are the per-unit * settings, if they exist. */ void mps_get_tunables(struct mps_softc *sc) { char tmpstr[80], mps_debug[80]; /* XXX default to some debugging for now */ sc->mps_debug = MPS_INFO|MPS_FAULT; sc->disable_msix = 0; sc->disable_msi = 0; sc->max_msix = MPS_MSIX_MAX; sc->max_chains = MPS_CHAIN_FRAMES; sc->max_io_pages = MPS_MAXIO_PAGES; sc->enable_ssu = MPS_SSU_ENABLE_SSD_DISABLE_HDD; sc->spinup_wait_time = DEFAULT_SPINUP_WAIT; sc->use_phynum = 1; sc->max_reqframes = MPS_REQ_FRAMES; sc->max_prireqframes = MPS_PRI_REQ_FRAMES; sc->max_replyframes = MPS_REPLY_FRAMES; sc->max_evtframes = MPS_EVT_REPLY_FRAMES; /* * Grab the global variables. */ bzero(mps_debug, 80); if (TUNABLE_STR_FETCH("hw.mps.debug_level", mps_debug, 80) != 0) mps_parse_debug(sc, mps_debug); TUNABLE_INT_FETCH("hw.mps.disable_msix", &sc->disable_msix); TUNABLE_INT_FETCH("hw.mps.disable_msi", &sc->disable_msi); TUNABLE_INT_FETCH("hw.mps.max_msix", &sc->max_msix); TUNABLE_INT_FETCH("hw.mps.max_chains", &sc->max_chains); TUNABLE_INT_FETCH("hw.mps.max_io_pages", &sc->max_io_pages); TUNABLE_INT_FETCH("hw.mps.enable_ssu", &sc->enable_ssu); TUNABLE_INT_FETCH("hw.mps.spinup_wait_time", &sc->spinup_wait_time); TUNABLE_INT_FETCH("hw.mps.use_phy_num", &sc->use_phynum); TUNABLE_INT_FETCH("hw.mps.max_reqframes", &sc->max_reqframes); TUNABLE_INT_FETCH("hw.mps.max_prireqframes", &sc->max_prireqframes); TUNABLE_INT_FETCH("hw.mps.max_replyframes", &sc->max_replyframes); TUNABLE_INT_FETCH("hw.mps.max_evtframes", &sc->max_evtframes); /* Grab the unit-instance variables */ snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.debug_level", device_get_unit(sc->mps_dev)); bzero(mps_debug, 80); if (TUNABLE_STR_FETCH(tmpstr, mps_debug, 80) != 0) mps_parse_debug(sc, mps_debug); snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msix", device_get_unit(sc->mps_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix); snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msi", device_get_unit(sc->mps_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi); snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_msix", device_get_unit(sc->mps_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_msix); snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_chains", device_get_unit(sc->mps_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_io_pages", device_get_unit(sc->mps_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages); bzero(sc->exclude_ids, sizeof(sc->exclude_ids)); snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.exclude_ids", device_get_unit(sc->mps_dev)); TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids)); snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.enable_ssu", device_get_unit(sc->mps_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu); snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.spinup_wait_time", device_get_unit(sc->mps_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time); snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.use_phy_num", device_get_unit(sc->mps_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum); snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_reqframes", device_get_unit(sc->mps_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes); snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_prireqframes", device_get_unit(sc->mps_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes); snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_replyframes", device_get_unit(sc->mps_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes); snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_evtframes", device_get_unit(sc->mps_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes); } static void mps_setup_sysctl(struct mps_softc *sc) { struct sysctl_ctx_list *sysctl_ctx = NULL; struct sysctl_oid *sysctl_tree = NULL; char tmpstr[80], tmpstr2[80]; /* * Setup the sysctl variable so the user can change the debug level * on the fly. */ snprintf(tmpstr, sizeof(tmpstr), "MPS controller %d", device_get_unit(sc->mps_dev)); snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mps_dev)); sysctl_ctx = device_get_sysctl_ctx(sc->mps_dev); if (sysctl_ctx != NULL) sysctl_tree = device_get_sysctl_tree(sc->mps_dev); if (sysctl_tree == NULL) { sysctl_ctx_init(&sc->sysctl_ctx); sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_mps), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr); if (sc->sysctl_tree == NULL) return; sysctl_ctx = &sc->sysctl_ctx; sysctl_tree = sc->sysctl_tree; } SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "debug_level", CTLTYPE_STRING | CTLFLAG_RW |CTLFLAG_MPSAFE, sc, 0, mps_debug_sysctl, "A", "mps debug level"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0, "Disable the use of MSI-X interrupts"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "disable_msi", CTLFLAG_RD, &sc->disable_msi, 0, "Disable the use of MSI interrupts"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_msix", CTLFLAG_RD, &sc->max_msix, 0, "User-defined maximum number of MSIX queues"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "msix_msgs", CTLFLAG_RD, &sc->msi_msgs, 0, "Negotiated number of MSIX queues"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0, "Total number of allocated request frames"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0, "Total number of allocated high priority request frames"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0, "Total number of allocated reply frames"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0, "Total number of event frames allocated"); SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "firmware_version", CTLFLAG_RW, sc->fw_version, strlen(sc->fw_version), "firmware version"); SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "driver_version", CTLFLAG_RW, MPS_DRIVER_VERSION, strlen(MPS_DRIVER_VERSION), "driver version"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "io_cmds_active", CTLFLAG_RD, &sc->io_cmds_active, 0, "number of currently active commands"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, &sc->io_cmds_highwater, 0, "maximum active commands seen"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "chain_free", CTLFLAG_RD, &sc->chain_free, 0, "number of free chain elements"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "chain_free_lowwater", CTLFLAG_RD, &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_chains", CTLFLAG_RD, &sc->max_chains, 0,"maximum chain frames that will be allocated"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_io_pages", CTLFLAG_RD, &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use " "IOCFacts)"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0, "enable SSU to SATA SSD/HDD at shutdown"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "chain_alloc_fail", CTLFLAG_RD, &sc->chain_alloc_fail, "chain allocation failures"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "spinup_wait_time", CTLFLAG_RD, &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for " "spinup after SATA ID error"); SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "mapping_table_dump", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, mps_mapping_dump, "A", "Mapping Table Dump"); SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "encl_table_dump", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, mps_mapping_encl_dump, "A", "Enclosure Table Dump"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0, "Use the phy number for enumeration"); } static struct mps_debug_string { char *name; int flag; } mps_debug_strings[] = { {"info", MPS_INFO}, {"fault", MPS_FAULT}, {"event", MPS_EVENT}, {"log", MPS_LOG}, {"recovery", MPS_RECOVERY}, {"error", MPS_ERROR}, {"init", MPS_INIT}, {"xinfo", MPS_XINFO}, {"user", MPS_USER}, {"mapping", MPS_MAPPING}, {"trace", MPS_TRACE} }; enum mps_debug_level_combiner { COMB_NONE, COMB_ADD, COMB_SUB }; static int mps_debug_sysctl(SYSCTL_HANDLER_ARGS) { struct mps_softc *sc; struct mps_debug_string *string; struct sbuf *sbuf; char *buffer; size_t sz; int i, len, debug, error; sc = (struct mps_softc *)arg1; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); debug = sc->mps_debug; sbuf_printf(sbuf, "%#x", debug); sz = sizeof(mps_debug_strings) / sizeof(mps_debug_strings[0]); for (i = 0; i < sz; i++) { string = &mps_debug_strings[i]; if (debug & string->flag) sbuf_printf(sbuf, ",%s", string->name); } error = sbuf_finish(sbuf); sbuf_delete(sbuf); if (error || req->newptr == NULL) return (error); len = req->newlen - req->newidx; if (len == 0) return (0); buffer = malloc(len, M_MPT2, M_ZERO|M_WAITOK); error = SYSCTL_IN(req, buffer, len); mps_parse_debug(sc, buffer); free(buffer, M_MPT2); return (error); } static void mps_parse_debug(struct mps_softc *sc, char *list) { struct mps_debug_string *string; enum mps_debug_level_combiner op; char *token, *endtoken; size_t sz; int flags, i; if (list == NULL || *list == '\0') return; if (*list == '+') { op = COMB_ADD; list++; } else if (*list == '-') { op = COMB_SUB; list++; } else op = COMB_NONE; if (*list == '\0') return; flags = 0; sz = sizeof(mps_debug_strings) / sizeof(mps_debug_strings[0]); while ((token = strsep(&list, ":,")) != NULL) { /* Handle integer flags */ flags |= strtol(token, &endtoken, 0); if (token != endtoken) continue; /* Handle text flags */ for (i = 0; i < sz; i++) { string = &mps_debug_strings[i]; if (strcasecmp(token, string->name) == 0) { flags |= string->flag; break; } } } switch (op) { case COMB_NONE: sc->mps_debug = flags; break; case COMB_ADD: sc->mps_debug |= flags; break; case COMB_SUB: sc->mps_debug &= (~flags); break; } return; } int mps_attach(struct mps_softc *sc) { int error; MPS_FUNCTRACE(sc); mps_dprint(sc, MPS_INIT, "%s entered\n", __func__); mtx_init(&sc->mps_mtx, "MPT2SAS lock", NULL, MTX_DEF); callout_init_mtx(&sc->periodic, &sc->mps_mtx, 0); callout_init_mtx(&sc->device_check_callout, &sc->mps_mtx, 0); TAILQ_INIT(&sc->event_list); timevalclear(&sc->lastfail); if ((error = mps_transition_ready(sc)) != 0) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "failed to transition " "ready\n"); return (error); } sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPT2, M_ZERO|M_NOWAIT); if(!sc->facts) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "Cannot allocate memory, " "exit\n"); return (ENOMEM); } /* * Get IOC Facts and allocate all structures based on this information. * A Diag Reset will also call mps_iocfacts_allocate and re-read the IOC * Facts. If relevant values have changed in IOC Facts, this function * will free all of the memory based on IOC Facts and reallocate that * memory. If this fails, any allocated memory should already be freed. */ if ((error = mps_iocfacts_allocate(sc, TRUE)) != 0) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "IOC Facts based allocation " "failed with error %d, exit\n", error); return (error); } /* Start the periodic watchdog check on the IOC Doorbell */ mps_periodic(sc); /* * The portenable will kick off discovery events that will drive the * rest of the initialization process. The CAM/SAS module will * hold up the boot sequence until discovery is complete. */ sc->mps_ich.ich_func = mps_startup; sc->mps_ich.ich_arg = sc; if (config_intrhook_establish(&sc->mps_ich) != 0) { mps_dprint(sc, MPS_INIT|MPS_ERROR, "Cannot establish MPS config hook\n"); error = EINVAL; } /* * Allow IR to shutdown gracefully when shutdown occurs. */ sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, mpssas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT); if (sc->shutdown_eh == NULL) mps_dprint(sc, MPS_INIT|MPS_ERROR, "shutdown event registration failed\n"); mps_setup_sysctl(sc); sc->mps_flags |= MPS_FLAGS_ATTACH_DONE; mps_dprint(sc, MPS_INIT, "%s exit error= %d\n", __func__, error); return (error); } /* Run through any late-start handlers. */ static void mps_startup(void *arg) { struct mps_softc *sc; sc = (struct mps_softc *)arg; mps_dprint(sc, MPS_INIT, "%s entered\n", __func__); mps_lock(sc); mps_unmask_intr(sc); /* initialize device mapping tables */ mps_base_static_config_pages(sc); mps_mapping_initialize(sc); mpssas_startup(sc); mps_unlock(sc); mps_dprint(sc, MPS_INIT, "disestablish config intrhook\n"); config_intrhook_disestablish(&sc->mps_ich); sc->mps_ich.ich_arg = NULL; mps_dprint(sc, MPS_INIT, "%s exit\n", __func__); } /* Periodic watchdog. Is called with the driver lock already held. */ static void mps_periodic(void *arg) { struct mps_softc *sc; uint32_t db; sc = (struct mps_softc *)arg; if (sc->mps_flags & MPS_FLAGS_SHUTDOWN) return; db = mps_regread(sc, MPI2_DOORBELL_OFFSET); if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { mps_dprint(sc, MPS_FAULT, "IOC Fault 0x%08x, Resetting\n", db); mps_reinit(sc); } callout_reset(&sc->periodic, MPS_PERIODIC_DELAY * hz, mps_periodic, sc); } static void mps_log_evt_handler(struct mps_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *event) { MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry; MPS_DPRINT_EVENT(sc, generic, event); switch (event->Event) { case MPI2_EVENT_LOG_DATA: mps_dprint(sc, MPS_EVENT, "MPI2_EVENT_LOG_DATA:\n"); if (sc->mps_debug & MPS_EVENT) hexdump(event->EventData, event->EventDataLength, NULL, 0); break; case MPI2_EVENT_LOG_ENTRY_ADDED: entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; mps_dprint(sc, MPS_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event " "0x%x Sequence %d:\n", entry->LogEntryQualifier, entry->LogSequence); break; default: break; } return; } static int mps_attach_log(struct mps_softc *sc) { u32 events[MPI2_EVENT_NOTIFY_EVENTMASK_WORDS]; bzero(events, 16); setbit(events, MPI2_EVENT_LOG_DATA); setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); mps_register_events(sc, events, mps_log_evt_handler, NULL, &sc->mps_log_eh); return (0); } static int mps_detach_log(struct mps_softc *sc) { if (sc->mps_log_eh != NULL) mps_deregister_events(sc, sc->mps_log_eh); return (0); } /* * Free all of the driver resources and detach submodules. Should be called * without the lock held. */ int mps_free(struct mps_softc *sc) { int error; mps_dprint(sc, MPS_INIT, "%s entered\n", __func__); /* Turn off the watchdog */ mps_lock(sc); sc->mps_flags |= MPS_FLAGS_SHUTDOWN; mps_unlock(sc); /* Lock must not be held for this */ callout_drain(&sc->periodic); callout_drain(&sc->device_check_callout); if (((error = mps_detach_log(sc)) != 0) || ((error = mps_detach_sas(sc)) != 0)) { mps_dprint(sc, MPS_INIT|MPS_FAULT, "failed to detach " "subsystems, exit\n"); return (error); } mps_detach_user(sc); /* Put the IOC back in the READY state. */ mps_lock(sc); if ((error = mps_transition_ready(sc)) != 0) { mps_unlock(sc); return (error); } mps_unlock(sc); if (sc->facts != NULL) free(sc->facts, M_MPT2); /* * Free all buffers that are based on IOC Facts. A Diag Reset may need * to free these buffers too. */ mps_iocfacts_free(sc); if (sc->sysctl_tree != NULL) sysctl_ctx_free(&sc->sysctl_ctx); /* Deregister the shutdown function */ if (sc->shutdown_eh != NULL) EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); mtx_destroy(&sc->mps_mtx); mps_dprint(sc, MPS_INIT, "%s exit\n", __func__); return (0); } static __inline void mps_complete_command(struct mps_softc *sc, struct mps_command *cm) { MPS_FUNCTRACE(sc); if (cm == NULL) { mps_dprint(sc, MPS_ERROR, "Completing NULL command\n"); return; } if (cm->cm_flags & MPS_CM_FLAGS_POLLED) cm->cm_flags |= MPS_CM_FLAGS_COMPLETE; if (cm->cm_complete != NULL) { mps_dprint(sc, MPS_TRACE, "%s cm %p calling cm_complete %p data %p reply %p\n", __func__, cm, cm->cm_complete, cm->cm_complete_data, cm->cm_reply); cm->cm_complete(sc, cm); } if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) { mps_dprint(sc, MPS_TRACE, "waking up %p\n", cm); wakeup(cm); } if (cm->cm_sc->io_cmds_active != 0) { cm->cm_sc->io_cmds_active--; } else { mps_dprint(sc, MPS_ERROR, "Warning: io_cmds_active is " "out of sync - resynching to 0\n"); } } static void mps_sas_log_info(struct mps_softc *sc , u32 log_info) { union loginfo_type { u32 loginfo; struct { u32 subcode:16; u32 code:8; u32 originator:4; u32 bus_type:4; } dw; }; union loginfo_type sas_loginfo; char *originator_str = NULL; sas_loginfo.loginfo = log_info; if (sas_loginfo.dw.bus_type != 3 /*SAS*/) return; /* each nexus loss loginfo */ if (log_info == 0x31170000) return; /* eat the loginfos associated with task aborts */ if ((log_info == 30050000 || log_info == 0x31140000 || log_info == 0x31130000)) return; switch (sas_loginfo.dw.originator) { case 0: originator_str = "IOP"; break; case 1: originator_str = "PL"; break; case 2: originator_str = "IR"; break; } mps_dprint(sc, MPS_LOG, "log_info(0x%08x): originator(%s), " "code(0x%02x), sub_code(0x%04x)\n", log_info, originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode); } static void mps_display_reply_info(struct mps_softc *sc, uint8_t *reply) { MPI2DefaultReply_t *mpi_reply; u16 sc_status; mpi_reply = (MPI2DefaultReply_t*)reply; sc_status = le16toh(mpi_reply->IOCStatus); if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) mps_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo)); } void mps_intr(void *data) { struct mps_softc *sc; uint32_t status; sc = (struct mps_softc *)data; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); /* * Check interrupt status register to flush the bus. This is * needed for both INTx interrupts and driver-driven polling */ status = mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0) return; mps_lock(sc); mps_intr_locked(data); mps_unlock(sc); return; } /* * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the * chip. Hopefully this theory is correct. */ void mps_intr_msi(void *data) { struct mps_softc *sc; sc = (struct mps_softc *)data; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); mps_lock(sc); mps_intr_locked(data); mps_unlock(sc); return; } /* * The locking is overly broad and simplistic, but easy to deal with for now. */ void mps_intr_locked(void *data) { MPI2_REPLY_DESCRIPTORS_UNION *desc; struct mps_softc *sc; struct mps_command *cm = NULL; uint8_t flags; u_int pq; MPI2_DIAG_RELEASE_REPLY *rel_rep; mps_fw_diagnostic_buffer_t *pBuffer; sc = (struct mps_softc *)data; pq = sc->replypostindex; mps_dprint(sc, MPS_TRACE, "%s sc %p starting with replypostindex %u\n", __func__, sc, sc->replypostindex); for ( ;; ) { cm = NULL; desc = &sc->post_queue[sc->replypostindex]; flags = desc->Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) || (le32toh(desc->Words.High) == 0xffffffff)) break; /* increment the replypostindex now, so that event handlers * and cm completion handlers which decide to do a diag * reset can zero it without it getting incremented again * afterwards, and we break out of this loop on the next * iteration since the reply post queue has been cleared to * 0xFF and all descriptors look unused (which they are). */ if (++sc->replypostindex >= sc->pqdepth) sc->replypostindex = 0; switch (flags) { case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS: cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)]; cm->cm_reply = NULL; break; case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY: { uint32_t baddr; uint8_t *reply; /* * Re-compose the reply address from the address * sent back from the chip. The ReplyFrameAddress * is the lower 32 bits of the physical address of * particular reply frame. Convert that address to * host format, and then use that to provide the * offset against the virtual address base * (sc->reply_frames). */ baddr = le32toh(desc->AddressReply.ReplyFrameAddress); reply = sc->reply_frames + (baddr - ((uint32_t)sc->reply_busaddr)); /* * Make sure the reply we got back is in a valid * range. If not, go ahead and panic here, since * we'll probably panic as soon as we deference the * reply pointer anyway. */ if ((reply < sc->reply_frames) || (reply > (sc->reply_frames + (sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) { printf("%s: WARNING: reply %p out of range!\n", __func__, reply); printf("%s: reply_frames %p, fqdepth %d, " "frame size %d\n", __func__, sc->reply_frames, sc->fqdepth, sc->facts->ReplyFrameSize * 4); printf("%s: baddr %#x,\n", __func__, baddr); /* LSI-TODO. See Linux Code. Need Graceful exit*/ panic("Reply address out of range"); } if (le16toh(desc->AddressReply.SMID) == 0) { if (((MPI2_DEFAULT_REPLY *)reply)->Function == MPI2_FUNCTION_DIAG_BUFFER_POST) { /* * If SMID is 0 for Diag Buffer Post, * this implies that the reply is due to * a release function with a status that * the buffer has been released. Set * the buffer flags accordingly. */ rel_rep = (MPI2_DIAG_RELEASE_REPLY *)reply; if ((le16toh(rel_rep->IOCStatus) & MPI2_IOCSTATUS_MASK) == MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) { pBuffer = &sc->fw_diag_buffer_list[ rel_rep->BufferType]; pBuffer->valid_data = TRUE; pBuffer->owned_by_firmware = FALSE; pBuffer->immediate = FALSE; } } else mps_dispatch_event(sc, baddr, (MPI2_EVENT_NOTIFICATION_REPLY *) reply); } else { cm = &sc->commands[le16toh(desc->AddressReply.SMID)]; cm->cm_reply = reply; cm->cm_reply_data = le32toh(desc->AddressReply.ReplyFrameAddress); } break; } case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS: case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER: case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS: default: /* Unhandled */ mps_dprint(sc, MPS_ERROR, "Unhandled reply 0x%x\n", desc->Default.ReplyFlags); cm = NULL; break; } if (cm != NULL) { // Print Error reply frame if (cm->cm_reply) mps_display_reply_info(sc,cm->cm_reply); mps_complete_command(sc, cm); } desc->Words.Low = 0xffffffff; desc->Words.High = 0xffffffff; } if (pq != sc->replypostindex) { mps_dprint(sc, MPS_TRACE, "%s sc %p writing postindex %d\n", __func__, sc, sc->replypostindex); mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex); } return; } static void mps_dispatch_event(struct mps_softc *sc, uintptr_t data, MPI2_EVENT_NOTIFICATION_REPLY *reply) { struct mps_event_handle *eh; int event, handled = 0; event = le16toh(reply->Event); TAILQ_FOREACH(eh, &sc->event_list, eh_list) { if (isset(eh->mask, event)) { eh->callback(sc, data, reply); handled++; } } if (handled == 0) mps_dprint(sc, MPS_EVENT, "Unhandled event 0x%x\n", le16toh(event)); /* * This is the only place that the event/reply should be freed. * Anything wanting to hold onto the event data should have * already copied it into their own storage. */ mps_free_reply(sc, data); } static void mps_reregister_events_complete(struct mps_softc *sc, struct mps_command *cm) { mps_dprint(sc, MPS_TRACE, "%s\n", __func__); if (cm->cm_reply) MPS_DPRINT_EVENT(sc, generic, (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); mps_free_command(sc, cm); /* next, send a port enable */ mpssas_startup(sc); } /* * For both register_events and update_events, the caller supplies a bitmap * of events that it _wants_. These functions then turn that into a bitmask * suitable for the controller. */ int mps_register_events(struct mps_softc *sc, u32 *mask, mps_evt_callback_t *cb, void *data, struct mps_event_handle **handle) { struct mps_event_handle *eh; int error = 0; eh = malloc(sizeof(struct mps_event_handle), M_MPT2, M_WAITOK|M_ZERO); if(!eh) { mps_dprint(sc, MPS_ERROR, "Cannot allocate event memory\n"); return (ENOMEM); } eh->callback = cb; eh->data = data; TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); if (mask != NULL) error = mps_update_events(sc, eh, mask); *handle = eh; return (error); } int mps_update_events(struct mps_softc *sc, struct mps_event_handle *handle, u32 *mask) { MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; MPI2_EVENT_NOTIFICATION_REPLY *reply = NULL; struct mps_command *cm; int error, i; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); if ((mask != NULL) && (handle != NULL)) bcopy(mask, &handle->mask[0], sizeof(u32) * MPI2_EVENT_NOTIFY_EVENTMASK_WORDS); for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) sc->event_mask[i] = -1; for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) sc->event_mask[i] &= ~handle->mask[i]; if ((cm = mps_alloc_command(sc)) == NULL) return (EBUSY); evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; evtreq->MsgFlags = 0; evtreq->SASBroadcastPrimitiveMasks = 0; #ifdef MPS_DEBUG_ALL_EVENTS { u_char fullmask[16]; memset(fullmask, 0x00, 16); bcopy(fullmask, &evtreq->EventMasks[0], sizeof(u32) * MPI2_EVENT_NOTIFY_EVENTMASK_WORDS); } #else for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) evtreq->EventMasks[i] = htole32(sc->event_mask[i]); #endif cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; error = mps_wait_command(sc, &cm, 60, 0); if (cm != NULL) reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; if ((reply == NULL) || (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) error = ENXIO; if (reply) MPS_DPRINT_EVENT(sc, generic, reply); mps_dprint(sc, MPS_TRACE, "%s finished error %d\n", __func__, error); if (cm != NULL) mps_free_command(sc, cm); return (error); } static int mps_reregister_events(struct mps_softc *sc) { MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; struct mps_command *cm; struct mps_event_handle *eh; int error, i; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); /* first, reregister events */ for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) sc->event_mask[i] = -1; TAILQ_FOREACH(eh, &sc->event_list, eh_list) { for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) sc->event_mask[i] &= ~eh->mask[i]; } if ((cm = mps_alloc_command(sc)) == NULL) return (EBUSY); evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; evtreq->MsgFlags = 0; evtreq->SASBroadcastPrimitiveMasks = 0; #ifdef MPS_DEBUG_ALL_EVENTS { u_char fullmask[16]; memset(fullmask, 0x00, 16); bcopy(fullmask, &evtreq->EventMasks[0], sizeof(u32) * MPI2_EVENT_NOTIFY_EVENTMASK_WORDS); } #else for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) evtreq->EventMasks[i] = htole32(sc->event_mask[i]); #endif cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_data = NULL; cm->cm_complete = mps_reregister_events_complete; error = mps_map_command(sc, cm); mps_dprint(sc, MPS_TRACE, "%s finished with error %d\n", __func__, error); return (error); } void mps_deregister_events(struct mps_softc *sc, struct mps_event_handle *handle) { TAILQ_REMOVE(&sc->event_list, handle, eh_list); free(handle, M_MPT2); } /* * Add a chain element as the next SGE for the specified command. * Reset cm_sge and cm_sgesize to indicate all the available space. */ static int mps_add_chain(struct mps_command *cm) { MPI2_SGE_CHAIN32 *sgc; struct mps_chain *chain; int space; if (cm->cm_sglsize < MPS_SGC_SIZE) panic("MPS: Need SGE Error Code\n"); chain = mps_alloc_chain(cm->cm_sc); if (chain == NULL) return (ENOBUFS); space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4; /* * Note: a double-linked list is used to make it easier to * walk for debugging. */ TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); sgc = (MPI2_SGE_CHAIN32 *)&cm->cm_sge->MpiChain; sgc->Length = htole16(space); sgc->NextChainOffset = 0; /* TODO Looks like bug in Setting sgc->Flags. * sgc->Flags = ( MPI2_SGE_FLAGS_CHAIN_ELEMENT | MPI2_SGE_FLAGS_64_BIT_ADDRESSING | * MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT * This is fine.. because we are not using simple element. In case of * MPI2_SGE_CHAIN32, we have separate Length and Flags feild. */ sgc->Flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT; sgc->Address = htole32(chain->chain_busaddr); cm->cm_sge = (MPI2_SGE_IO_UNION *)&chain->chain->MpiSimple; cm->cm_sglsize = space; return (0); } /* * Add one scatter-gather element (chain, simple, transaction context) * to the scatter-gather list for a command. Maintain cm_sglsize and * cm_sge as the remaining size and pointer to the next SGE to fill * in, respectively. */ int mps_push_sge(struct mps_command *cm, void *sgep, size_t len, int segsleft) { MPI2_SGE_TRANSACTION_UNION *tc = sgep; MPI2_SGE_SIMPLE64 *sge = sgep; int error, type; uint32_t saved_buf_len, saved_address_low, saved_address_high; type = (tc->Flags & MPI2_SGE_FLAGS_ELEMENT_MASK); #ifdef INVARIANTS switch (type) { case MPI2_SGE_FLAGS_TRANSACTION_ELEMENT: { if (len != tc->DetailsLength + 4) panic("TC %p length %u or %zu?", tc, tc->DetailsLength + 4, len); } break; case MPI2_SGE_FLAGS_CHAIN_ELEMENT: /* Driver only uses 32-bit chain elements */ if (len != MPS_SGC_SIZE) panic("CHAIN %p length %u or %zu?", sgep, MPS_SGC_SIZE, len); break; case MPI2_SGE_FLAGS_SIMPLE_ELEMENT: /* Driver only uses 64-bit SGE simple elements */ if (len != MPS_SGE64_SIZE) panic("SGE simple %p length %u or %zu?", sge, MPS_SGE64_SIZE, len); if (((le32toh(sge->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT) & MPI2_SGE_FLAGS_ADDRESS_SIZE) == 0) panic("SGE simple %p not marked 64-bit?", sge); break; default: panic("Unexpected SGE %p, flags %02x", tc, tc->Flags); } #endif /* * case 1: 1 more segment, enough room for it * case 2: 2 more segments, enough room for both * case 3: >=2 more segments, only enough room for 1 and a chain * case 4: >=1 more segment, enough room for only a chain * case 5: >=1 more segment, no room for anything (error) */ /* * There should be room for at least a chain element, or this * code is buggy. Case (5). */ if (cm->cm_sglsize < MPS_SGC_SIZE) panic("MPS: Need SGE Error Code\n"); if (segsleft >= 2 && cm->cm_sglsize < len + MPS_SGC_SIZE + MPS_SGE64_SIZE) { /* * There are 2 or more segments left to add, and only * enough room for 1 and a chain. Case (3). * * Mark as last element in this chain if necessary. */ if (type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) { sge->FlagsLength |= htole32( MPI2_SGE_FLAGS_LAST_ELEMENT << MPI2_SGE_FLAGS_SHIFT); } /* * Add the item then a chain. Do the chain now, * rather than on the next iteration, to simplify * understanding the code. */ cm->cm_sglsize -= len; bcopy(sgep, cm->cm_sge, len); cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); return (mps_add_chain(cm)); } if (segsleft >= 1 && cm->cm_sglsize < len + MPS_SGC_SIZE) { /* * 1 or more segment, enough room for only a chain. * Hope the previous element wasn't a Simple entry * that needed to be marked with * MPI2_SGE_FLAGS_LAST_ELEMENT. Case (4). */ if ((error = mps_add_chain(cm)) != 0) return (error); } #ifdef INVARIANTS /* Case 1: 1 more segment, enough room for it. */ if (segsleft == 1 && cm->cm_sglsize < len) panic("1 seg left and no room? %u versus %zu", cm->cm_sglsize, len); /* Case 2: 2 more segments, enough room for both */ if (segsleft == 2 && cm->cm_sglsize < len + MPS_SGE64_SIZE) panic("2 segs left and no room? %u versus %zu", cm->cm_sglsize, len); #endif if (segsleft == 1 && type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) { /* * If this is a bi-directional request, need to account for that * here. Save the pre-filled sge values. These will be used * either for the 2nd SGL or for a single direction SGL. If * cm_out_len is non-zero, this is a bi-directional request, so * fill in the OUT SGL first, then the IN SGL, otherwise just * fill in the IN SGL. Note that at this time, when filling in * 2 SGL's for a bi-directional request, they both use the same * DMA buffer (same cm command). */ saved_buf_len = le32toh(sge->FlagsLength) & 0x00FFFFFF; saved_address_low = sge->Address.Low; saved_address_high = sge->Address.High; if (cm->cm_out_len) { sge->FlagsLength = htole32(cm->cm_out_len | ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC | MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << MPI2_SGE_FLAGS_SHIFT)); cm->cm_sglsize -= len; bcopy(sgep, cm->cm_sge, len); cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); } saved_buf_len |= ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << MPI2_SGE_FLAGS_SHIFT); if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) { saved_buf_len |= ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) << MPI2_SGE_FLAGS_SHIFT); } else { saved_buf_len |= ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) << MPI2_SGE_FLAGS_SHIFT); } sge->FlagsLength = htole32(saved_buf_len); sge->Address.Low = saved_address_low; sge->Address.High = saved_address_high; } cm->cm_sglsize -= len; bcopy(sgep, cm->cm_sge, len); cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); return (0); } /* * Add one dma segment to the scatter-gather list for a command. */ int mps_add_dmaseg(struct mps_command *cm, vm_paddr_t pa, size_t len, u_int flags, int segsleft) { MPI2_SGE_SIMPLE64 sge; /* * This driver always uses 64-bit address elements for simplicity. */ bzero(&sge, sizeof(sge)); flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | MPI2_SGE_FLAGS_64_BIT_ADDRESSING; sge.FlagsLength = htole32(len | (flags << MPI2_SGE_FLAGS_SHIFT)); mps_from_u64(pa, &sge.Address); return (mps_push_sge(cm, &sge, sizeof sge, segsleft)); } static void mps_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct mps_softc *sc; struct mps_command *cm; u_int i, dir, sflags; cm = (struct mps_command *)arg; sc = cm->cm_sc; /* * In this case, just print out a warning and let the chip tell the * user they did the wrong thing. */ if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { mps_dprint(sc, MPS_ERROR, "%s: warning: busdma returned %d segments, " "more than the %d allowed\n", __func__, nsegs, cm->cm_max_segs); } /* * Set up DMA direction flags. Bi-directional requests are also handled * here. In that case, both direction flags will be set. */ sflags = 0; if (cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) { /* * We have to add a special case for SMP passthrough, there * is no easy way to generically handle it. The first * S/G element is used for the command (therefore the * direction bit needs to be set). The second one is used * for the reply. We'll leave it to the caller to make * sure we only have two buffers. */ /* * Even though the busdma man page says it doesn't make * sense to have both direction flags, it does in this case. * We have one s/g element being accessed in each direction. */ dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD; /* * Set the direction flag on the first buffer in the SMP * passthrough request. We'll clear it for the second one. */ sflags |= MPI2_SGE_FLAGS_DIRECTION | MPI2_SGE_FLAGS_END_OF_BUFFER; } else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) { sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC; dir = BUS_DMASYNC_PREWRITE; } else dir = BUS_DMASYNC_PREREAD; for (i = 0; i < nsegs; i++) { if ((cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) && (i != 0)) { sflags &= ~MPI2_SGE_FLAGS_DIRECTION; } error = mps_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, sflags, nsegs - i); if (error != 0) { /* Resource shortage, roll back! */ if (ratecheck(&sc->lastfail, &mps_chainfail_interval)) mps_dprint(sc, MPS_INFO, "Out of chain frames, " "consider increasing hw.mps.max_chains.\n"); cm->cm_flags |= MPS_CM_FLAGS_CHAIN_FAILED; mps_complete_command(sc, cm); return; } } bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); mps_enqueue_request(sc, cm); return; } static void mps_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, int error) { mps_data_cb(arg, segs, nsegs, error); } /* * This is the routine to enqueue commands ansynchronously. * Note that the only error path here is from bus_dmamap_load(), which can * return EINPROGRESS if it is waiting for resources. Other than this, it's * assumed that if you have a command in-hand, then you have enough credits * to use it. */ int mps_map_command(struct mps_softc *sc, struct mps_command *cm) { int error = 0; if (cm->cm_flags & MPS_CM_FLAGS_USE_UIO) { error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, &cm->cm_uio, mps_data_cb2, cm, 0); } else if (cm->cm_flags & MPS_CM_FLAGS_USE_CCB) { error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap, cm->cm_data, mps_data_cb, cm, 0); } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, cm->cm_data, cm->cm_length, mps_data_cb, cm, 0); } else { /* Add a zero-length element as needed */ if (cm->cm_sge != NULL) mps_add_dmaseg(cm, 0, 0, 0, 1); mps_enqueue_request(sc, cm); } return (error); } /* * This is the routine to enqueue commands synchronously. An error of * EINPROGRESS from mps_map_command() is ignored since the command will * be executed and enqueued automatically. Other errors come from msleep(). */ int mps_wait_command(struct mps_softc *sc, struct mps_command **cmp, int timeout, int sleep_flag) { int error, rc; struct timeval cur_time, start_time; struct mps_command *cm = *cmp; if (sc->mps_flags & MPS_FLAGS_DIAGRESET) return EBUSY; cm->cm_complete = NULL; cm->cm_flags |= MPS_CM_FLAGS_POLLED; error = mps_map_command(sc, cm); if ((error != 0) && (error != EINPROGRESS)) return (error); /* * Check for context and wait for 50 mSec at a time until time has * expired or the command has finished. If msleep can't be used, need * to poll. */ if (curthread->td_no_sleeping != 0) sleep_flag = NO_SLEEP; getmicrouptime(&start_time); if (mtx_owned(&sc->mps_mtx) && sleep_flag == CAN_SLEEP) { cm->cm_flags |= MPS_CM_FLAGS_WAKEUP; error = msleep(cm, &sc->mps_mtx, 0, "mpswait", timeout*hz); if (error == EWOULDBLOCK) { /* * Record the actual elapsed time in the case of a * timeout for the message below. */ getmicrouptime(&cur_time); timevalsub(&cur_time, &start_time); } } else { while ((cm->cm_flags & MPS_CM_FLAGS_COMPLETE) == 0) { mps_intr_locked(sc); if (sleep_flag == CAN_SLEEP) pause("mpswait", hz/20); else DELAY(50000); getmicrouptime(&cur_time); timevalsub(&cur_time, &start_time); if (cur_time.tv_sec > timeout) { error = EWOULDBLOCK; break; } } } if (error == EWOULDBLOCK) { mps_dprint(sc, MPS_FAULT, "Calling Reinit from %s, timeout=%d," " elapsed=%jd\n", __func__, timeout, (intmax_t)cur_time.tv_sec); rc = mps_reinit(sc); mps_dprint(sc, MPS_FAULT, "Reinit %s\n", (rc == 0) ? "success" : "failed"); if (sc->mps_flags & MPS_FLAGS_REALLOCATED) { /* * Tell the caller that we freed the command in a * reinit. */ *cmp = NULL; } error = ETIMEDOUT; } return (error); } /* * The MPT driver had a verbose interface for config pages. In this driver, * reduce it to much simpler terms, similar to the Linux driver. */ int mps_read_config_page(struct mps_softc *sc, struct mps_config_params *params) { MPI2_CONFIG_REQUEST *req; struct mps_command *cm; int error; if (sc->mps_flags & MPS_FLAGS_BUSY) { return (EBUSY); } cm = mps_alloc_command(sc); if (cm == NULL) { return (EBUSY); } req = (MPI2_CONFIG_REQUEST *)cm->cm_req; req->Function = MPI2_FUNCTION_CONFIG; req->Action = params->action; req->SGLFlags = 0; req->ChainOffset = 0; req->PageAddress = params->page_address; if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; hdr = ¶ms->hdr.Ext; req->ExtPageType = hdr->ExtPageType; req->ExtPageLength = hdr->ExtPageLength; req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; req->Header.PageLength = 0; /* Must be set to zero */ req->Header.PageNumber = hdr->PageNumber; req->Header.PageVersion = hdr->PageVersion; } else { MPI2_CONFIG_PAGE_HEADER *hdr; hdr = ¶ms->hdr.Struct; req->Header.PageType = hdr->PageType; req->Header.PageNumber = hdr->PageNumber; req->Header.PageLength = hdr->PageLength; req->Header.PageVersion = hdr->PageVersion; } cm->cm_data = params->buffer; cm->cm_length = params->length; if (cm->cm_data != NULL) { cm->cm_sge = &req->PageBufferSGE; cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN; } else cm->cm_sge = NULL; cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; cm->cm_complete_data = params; if (params->callback != NULL) { cm->cm_complete = mps_config_complete; return (mps_map_command(sc, cm)); } else { error = mps_wait_command(sc, &cm, 0, CAN_SLEEP); if (error) { mps_dprint(sc, MPS_FAULT, "Error %d reading config page\n", error); if (cm != NULL) mps_free_command(sc, cm); return (error); } mps_config_complete(sc, cm); } return (0); } int mps_write_config_page(struct mps_softc *sc, struct mps_config_params *params) { return (EINVAL); } static void mps_config_complete(struct mps_softc *sc, struct mps_command *cm) { MPI2_CONFIG_REPLY *reply; struct mps_config_params *params; MPS_FUNCTRACE(sc); params = cm->cm_complete_data; if (cm->cm_data != NULL) { bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); } /* * XXX KDM need to do more error recovery? This results in the * device in question not getting probed. */ if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { params->status = MPI2_IOCSTATUS_BUSY; goto done; } reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; if (reply == NULL) { params->status = MPI2_IOCSTATUS_BUSY; goto done; } params->status = reply->IOCStatus; if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { params->hdr.Ext.ExtPageType = reply->ExtPageType; params->hdr.Ext.ExtPageLength = reply->ExtPageLength; params->hdr.Ext.PageType = reply->Header.PageType; params->hdr.Ext.PageNumber = reply->Header.PageNumber; params->hdr.Ext.PageVersion = reply->Header.PageVersion; } else { params->hdr.Struct.PageType = reply->Header.PageType; params->hdr.Struct.PageNumber = reply->Header.PageNumber; params->hdr.Struct.PageLength = reply->Header.PageLength; params->hdr.Struct.PageVersion = reply->Header.PageVersion; } done: mps_free_command(sc, cm); if (params->callback != NULL) params->callback(sc, params); return; } Index: head/sys/dev/mps/mps_mapping.c =================================================================== --- head/sys/dev/mps/mps_mapping.c (revision 327948) +++ head/sys/dev/mps/mps_mapping.c (revision 327949) @@ -1,2678 +1,2678 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011-2015 LSI Corp. * Copyright (c) 2013-2015 Avago Technologies * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD */ #include __FBSDID("$FreeBSD$"); /* TODO Move headers to mpsvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /** * _mapping_clear_map_entry - Clear a particular mapping entry. * @map_entry: map table entry * * Returns nothing. */ static inline void _mapping_clear_map_entry(struct dev_mapping_table *map_entry) { map_entry->physical_id = 0; map_entry->device_info = 0; map_entry->phy_bits = 0; map_entry->dpm_entry_num = MPS_DPM_BAD_IDX; map_entry->dev_handle = 0; map_entry->id = -1; map_entry->missing_count = 0; map_entry->init_complete = 0; map_entry->TLR_bits = (u8)MPI2_SCSIIO_CONTROL_NO_TLR; } /** * _mapping_clear_enc_entry - Clear a particular enclosure table entry. * @enc_entry: enclosure table entry * * Returns nothing. */ static inline void _mapping_clear_enc_entry(struct enc_mapping_table *enc_entry) { enc_entry->enclosure_id = 0; enc_entry->start_index = MPS_MAPTABLE_BAD_IDX; enc_entry->phy_bits = 0; enc_entry->dpm_entry_num = MPS_DPM_BAD_IDX; enc_entry->enc_handle = 0; enc_entry->num_slots = 0; enc_entry->start_slot = 0; enc_entry->missing_count = 0; enc_entry->removal_flag = 0; enc_entry->skip_search = 0; enc_entry->init_complete = 0; } /** * _mapping_commit_enc_entry - write a particular enc entry in DPM page0. * @sc: per adapter object * @enc_entry: enclosure table entry * * Returns 0 for success, non-zero for failure. */ static int _mapping_commit_enc_entry(struct mps_softc *sc, struct enc_mapping_table *et_entry) { Mpi2DriverMap0Entry_t *dpm_entry; struct dev_mapping_table *mt_entry; Mpi2ConfigReply_t mpi_reply; Mpi2DriverMappingPage0_t config_page; if (!sc->is_dpm_enable) return 0; memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t)); memcpy(&config_page.Header, (u8 *) sc->dpm_pg0, sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += et_entry->dpm_entry_num; dpm_entry->PhysicalIdentifier.Low = ( 0xFFFFFFFF & et_entry->enclosure_id); dpm_entry->PhysicalIdentifier.High = ( et_entry->enclosure_id >> 32); mt_entry = &sc->mapping_table[et_entry->start_index]; dpm_entry->DeviceIndex = htole16(mt_entry->id); dpm_entry->MappingInformation = et_entry->num_slots; dpm_entry->MappingInformation <<= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT; dpm_entry->MappingInformation |= et_entry->missing_count; dpm_entry->MappingInformation = htole16(dpm_entry->MappingInformation); dpm_entry->PhysicalBitsMapping = htole32(et_entry->phy_bits); dpm_entry->Reserved1 = 0; mps_dprint(sc, MPS_MAPPING, "%s: Writing DPM entry %d for enclosure.\n", __func__, et_entry->dpm_entry_num); memcpy(&config_page.Entry, (u8 *)dpm_entry, sizeof(Mpi2DriverMap0Entry_t)); if (mps_config_set_dpm_pg0(sc, &mpi_reply, &config_page, et_entry->dpm_entry_num)) { mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: Write of DPM " "entry %d for enclosure failed.\n", __func__, et_entry->dpm_entry_num); dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); dpm_entry->PhysicalBitsMapping = le32toh(dpm_entry->PhysicalBitsMapping); return -1; } dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); dpm_entry->PhysicalBitsMapping = le32toh(dpm_entry->PhysicalBitsMapping); return 0; } /** * _mapping_commit_map_entry - write a particular map table entry in DPM page0. * @sc: per adapter object * @mt_entry: mapping table entry * * Returns 0 for success, non-zero for failure. */ static int _mapping_commit_map_entry(struct mps_softc *sc, struct dev_mapping_table *mt_entry) { Mpi2DriverMap0Entry_t *dpm_entry; Mpi2ConfigReply_t mpi_reply; Mpi2DriverMappingPage0_t config_page; if (!sc->is_dpm_enable) return 0; /* * It's possible that this Map Entry points to a BAD DPM index. This * can happen if the Map Entry is a for a missing device and the DPM * entry that was being used by this device is now being used by some * new device. So, check for a BAD DPM index and just return if so. */ if (mt_entry->dpm_entry_num == MPS_DPM_BAD_IDX) { mps_dprint(sc, MPS_MAPPING, "%s: DPM entry location for target " "%d is invalid. DPM will not be written.\n", __func__, mt_entry->id); return 0; } memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t)); memcpy(&config_page.Header, (u8 *)sc->dpm_pg0, sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *) sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry = dpm_entry + mt_entry->dpm_entry_num; dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF & mt_entry->physical_id); dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32); dpm_entry->DeviceIndex = htole16(mt_entry->id); dpm_entry->MappingInformation = htole16(mt_entry->missing_count); dpm_entry->PhysicalBitsMapping = 0; dpm_entry->Reserved1 = 0; memcpy(&config_page.Entry, (u8 *)dpm_entry, sizeof(Mpi2DriverMap0Entry_t)); mps_dprint(sc, MPS_MAPPING, "%s: Writing DPM entry %d for target %d.\n", __func__, mt_entry->dpm_entry_num, mt_entry->id); if (mps_config_set_dpm_pg0(sc, &mpi_reply, &config_page, mt_entry->dpm_entry_num)) { mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: Write of DPM " "entry %d for target %d failed.\n", __func__, mt_entry->dpm_entry_num, mt_entry->id); dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); return -1; } dpm_entry->MappingInformation = le16toh(dpm_entry->MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); return 0; } /** * _mapping_get_ir_maprange - get start and end index for IR map range. * @sc: per adapter object * @start_idx: place holder for start index * @end_idx: place holder for end index * * The IR volumes can be mapped either at start or end of the mapping table * this function gets the detail of where IR volume mapping starts and ends * in the device mapping table * * Returns nothing. */ static void _mapping_get_ir_maprange(struct mps_softc *sc, u32 *start_idx, u32 *end_idx) { u16 volume_mapping_flags; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; if (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) { *start_idx = 0; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0) *start_idx = 1; } else *start_idx = sc->max_devices - sc->max_volumes; *end_idx = *start_idx + sc->max_volumes - 1; } /** * _mapping_get_enc_idx_from_id - get enclosure index from enclosure ID * @sc: per adapter object * @enc_id: enclosure logical identifier * * Returns the index of enclosure entry on success or bad index. */ static u8 _mapping_get_enc_idx_from_id(struct mps_softc *sc, u64 enc_id, u64 phy_bits) { struct enc_mapping_table *et_entry; u8 enc_idx = 0; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) { et_entry = &sc->enclosure_table[enc_idx]; if ((et_entry->enclosure_id == le64toh(enc_id)) && (!et_entry->phy_bits || (et_entry->phy_bits & le32toh(phy_bits)))) return enc_idx; } return MPS_ENCTABLE_BAD_IDX; } /** * _mapping_get_enc_idx_from_handle - get enclosure index from handle * @sc: per adapter object * @enc_id: enclosure handle * * Returns the index of enclosure entry on success or bad index. */ static u8 _mapping_get_enc_idx_from_handle(struct mps_softc *sc, u16 handle) { struct enc_mapping_table *et_entry; u8 enc_idx = 0; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) { et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->missing_count) continue; if (et_entry->enc_handle == handle) return enc_idx; } return MPS_ENCTABLE_BAD_IDX; } /** * _mapping_get_high_missing_et_idx - get missing enclosure index * @sc: per adapter object * * Search through the enclosure table and identifies the enclosure entry * with high missing count and returns it's index * * Returns the index of enclosure entry on success or bad index. */ static u8 _mapping_get_high_missing_et_idx(struct mps_softc *sc) { struct enc_mapping_table *et_entry; u8 high_missing_count = 0; u8 enc_idx, high_idx = MPS_ENCTABLE_BAD_IDX; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++) { et_entry = &sc->enclosure_table[enc_idx]; if ((et_entry->missing_count > high_missing_count) && !et_entry->skip_search) { high_missing_count = et_entry->missing_count; high_idx = enc_idx; } } return high_idx; } /** * _mapping_get_high_missing_mt_idx - get missing map table index * @sc: per adapter object * * Search through the map table and identifies the device entry * with high missing count and returns it's index * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_high_missing_mt_idx(struct mps_softc *sc) { u32 map_idx, high_idx = MPS_MAPTABLE_BAD_IDX; u8 high_missing_count = 0; u32 start_idx, end_idx, start_idx_ir, end_idx_ir; struct dev_mapping_table *mt_entry; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); start_idx = 0; start_idx_ir = 0; end_idx_ir = 0; end_idx = sc->max_devices; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0) start_idx = 1; if (sc->ir_firmware) { _mapping_get_ir_maprange(sc, &start_idx_ir, &end_idx_ir); if (start_idx == start_idx_ir) start_idx = end_idx_ir + 1; else end_idx = start_idx_ir; } mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx < end_idx; map_idx++, mt_entry++) { if (mt_entry->missing_count > high_missing_count) { high_missing_count = mt_entry->missing_count; high_idx = map_idx; } } return high_idx; } /** * _mapping_get_ir_mt_idx_from_wwid - get map table index from volume WWID * @sc: per adapter object * @wwid: world wide unique ID of the volume * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_ir_mt_idx_from_wwid(struct mps_softc *sc, u64 wwid) { u32 start_idx, end_idx, map_idx; struct dev_mapping_table *mt_entry; _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) if (mt_entry->physical_id == wwid) return map_idx; return MPS_MAPTABLE_BAD_IDX; } /** * _mapping_get_mt_idx_from_id - get map table index from a device ID * @sc: per adapter object * @dev_id: device identifer (SAS Address) * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_mt_idx_from_id(struct mps_softc *sc, u64 dev_id) { u32 map_idx; struct dev_mapping_table *mt_entry; for (map_idx = 0; map_idx < sc->max_devices; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->physical_id == dev_id) return map_idx; } return MPS_MAPTABLE_BAD_IDX; } /** * _mapping_get_ir_mt_idx_from_handle - get map table index from volume handle * @sc: per adapter object * @wwid: volume device handle * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_ir_mt_idx_from_handle(struct mps_softc *sc, u16 volHandle) { u32 start_idx, end_idx, map_idx; struct dev_mapping_table *mt_entry; _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) if (mt_entry->dev_handle == volHandle) return map_idx; return MPS_MAPTABLE_BAD_IDX; } /** * _mapping_get_mt_idx_from_handle - get map table index from handle * @sc: per adapter object * @dev_id: device handle * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_mt_idx_from_handle(struct mps_softc *sc, u16 handle) { u32 map_idx; struct dev_mapping_table *mt_entry; for (map_idx = 0; map_idx < sc->max_devices; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->dev_handle == handle) return map_idx; } return MPS_MAPTABLE_BAD_IDX; } /** * _mapping_get_free_ir_mt_idx - get first free index for a volume * @sc: per adapter object * * Search through mapping table for free index for a volume and if no free * index then looks for a volume with high mapping index * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_free_ir_mt_idx(struct mps_softc *sc) { u8 high_missing_count = 0; u32 start_idx, end_idx, map_idx; u32 high_idx = MPS_MAPTABLE_BAD_IDX; struct dev_mapping_table *mt_entry; /* * The IN_USE flag should be clear if the entry is available to use. * This flag is cleared on initialization and and when a volume is * deleted. All other times this flag should be set. If, for some * reason, a free entry cannot be found, look for the entry with the * highest missing count just in case there is one. */ _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) { if (!(mt_entry->device_info & MPS_MAP_IN_USE)) return map_idx; if (mt_entry->missing_count > high_missing_count) { high_missing_count = mt_entry->missing_count; high_idx = map_idx; } } if (high_idx == MPS_MAPTABLE_BAD_IDX) { mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: Could not find a " "free entry in the mapping table for a Volume. The mapping " "table is probably corrupt.\n", __func__); } return high_idx; } /** * _mapping_get_free_mt_idx - get first free index for a device * @sc: per adapter object * @start_idx: offset in the table to start search * * Returns the index of map table entry on success or bad index. */ static u32 _mapping_get_free_mt_idx(struct mps_softc *sc, u32 start_idx) { u32 map_idx, max_idx = sc->max_devices; struct dev_mapping_table *mt_entry = &sc->mapping_table[start_idx]; u16 volume_mapping_flags; volume_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; if (sc->ir_firmware && (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_HIGH_VOLUME_MAPPING)) max_idx -= sc->max_volumes; for (map_idx = start_idx; map_idx < max_idx; map_idx++, mt_entry++) if (!(mt_entry->device_info & (MPS_MAP_IN_USE | MPS_DEV_RESERVED))) return map_idx; return MPS_MAPTABLE_BAD_IDX; } /** * _mapping_get_dpm_idx_from_id - get DPM index from ID * @sc: per adapter object * @id: volume WWID or enclosure ID or device ID * * Returns the index of DPM entry on success or bad index. */ static u16 _mapping_get_dpm_idx_from_id(struct mps_softc *sc, u64 id, u32 phy_bits) { u16 entry_num; uint64_t PhysicalIdentifier; Mpi2DriverMap0Entry_t *dpm_entry; dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); PhysicalIdentifier = dpm_entry->PhysicalIdentifier.High; PhysicalIdentifier = (PhysicalIdentifier << 32) | dpm_entry->PhysicalIdentifier.Low; for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++, dpm_entry++) if ((id == PhysicalIdentifier) && (!phy_bits || !dpm_entry->PhysicalBitsMapping || (phy_bits & dpm_entry->PhysicalBitsMapping))) return entry_num; return MPS_DPM_BAD_IDX; } /** * _mapping_get_free_dpm_idx - get first available DPM index * @sc: per adapter object * * Returns the index of DPM entry on success or bad index. */ static u32 _mapping_get_free_dpm_idx(struct mps_softc *sc) { u16 entry_num; Mpi2DriverMap0Entry_t *dpm_entry; u16 current_entry = MPS_DPM_BAD_IDX, missing_cnt, high_missing_cnt = 0; u64 physical_id; struct dev_mapping_table *mt_entry; u32 map_idx; for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += entry_num; missing_cnt = dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK; /* * If entry is used and not missing, then this entry can't be * used. Look at next one. */ if (sc->dpm_entry_used[entry_num] && !missing_cnt) continue; /* * If this entry is not used at all, then the missing count * doesn't matter. Just use this one. Otherwise, keep looking * and make sure the entry with the highest missing count is * used. */ if (!sc->dpm_entry_used[entry_num]) { current_entry = entry_num; break; } if ((current_entry == MPS_DPM_BAD_IDX) || (missing_cnt > high_missing_cnt)) { current_entry = entry_num; high_missing_cnt = missing_cnt; } } /* * If an entry has been found to use and it's already marked as used * it means that some device was already using this entry but it's * missing, and that means that the connection between the missing * device's DPM entry and the mapping table needs to be cleared. To do * this, use the Physical ID of the old device still in the DPM entry * to find its mapping table entry, then mark its DPM entry as BAD. */ if ((current_entry != MPS_DPM_BAD_IDX) && sc->dpm_entry_used[current_entry]) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += current_entry; physical_id = dpm_entry->PhysicalIdentifier.High; physical_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; map_idx = _mapping_get_mt_idx_from_id(sc, physical_id); if (map_idx != MPS_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[map_idx]; mt_entry->dpm_entry_num = MPS_DPM_BAD_IDX; } } return current_entry; } /** * _mapping_update_ir_missing_cnt - Updates missing count for a volume * @sc: per adapter object * @map_idx: map table index of the volume * @element: IR configuration change element * @wwid: IR volume ID. * * Updates the missing count in the map table and in the DPM entry for a volume * * Returns nothing. */ static void _mapping_update_ir_missing_cnt(struct mps_softc *sc, u32 map_idx, Mpi2EventIrConfigElement_t *element, u64 wwid) { struct dev_mapping_table *mt_entry; u8 missing_cnt, reason = element->ReasonCode, update_dpm = 1; u16 dpm_idx; Mpi2DriverMap0Entry_t *dpm_entry; /* * Depending on the reason code, update the missing count. Always set * the init_complete flag when here, so just do it first. That flag is * used for volumes to make sure that the DPM entry has been updated. * When a volume is deleted, clear the map entry's IN_USE flag so that * the entry can be used again if another volume is created. Also clear * its dev_handle entry so that other functions can't find this volume * by the handle, since it's not defined any longer. */ mt_entry = &sc->mapping_table[map_idx]; mt_entry->init_complete = 1; if ((reason == MPI2_EVENT_IR_CHANGE_RC_ADDED) || (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED)) { mt_entry->missing_count = 0; } else if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) { if (mt_entry->missing_count < MPS_MAX_MISSING_COUNT) mt_entry->missing_count++; mt_entry->device_info &= ~MPS_MAP_IN_USE; mt_entry->dev_handle = 0; } /* * If persistent mapping is enabled, update the DPM with the new missing * count for the volume. If the DPM index is bad, get a free one. If * it's bad for a volume that's being deleted do nothing because that * volume doesn't have a DPM entry. */ if (!sc->is_dpm_enable) return; dpm_idx = mt_entry->dpm_entry_num; if (dpm_idx == MPS_DPM_BAD_IDX) { if (reason == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) { mps_dprint(sc, MPS_MAPPING, "%s: Volume being deleted " "is not in DPM so DPM missing count will not be " "updated.\n", __func__); return; } } if (dpm_idx == MPS_DPM_BAD_IDX) dpm_idx = _mapping_get_free_dpm_idx(sc); /* * Got the DPM entry for the volume or found a free DPM entry if this is * a new volume. Check if the current information is outdated. */ if (dpm_idx != MPS_DPM_BAD_IDX) { dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += dpm_idx; missing_cnt = dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK; if ((mt_entry->physical_id == le64toh(((u64)dpm_entry->PhysicalIdentifier.High << 32) | (u64)dpm_entry->PhysicalIdentifier.Low)) && (missing_cnt == mt_entry->missing_count)) { mps_dprint(sc, MPS_MAPPING, "%s: DPM entry for volume " "with target ID %d does not require an update.\n", __func__, mt_entry->id); update_dpm = 0; } } /* * Update the volume's persistent info if it's new or the ID or missing * count has changed. If a good DPM index has not been found by now, * there is no space left in the DPM table. */ if ((dpm_idx != MPS_DPM_BAD_IDX) && update_dpm) { mps_dprint(sc, MPS_MAPPING, "%s: Update DPM entry for volume " "with target ID %d.\n", __func__, mt_entry->id); mt_entry->dpm_entry_num = dpm_idx; dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += dpm_idx; dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF & mt_entry->physical_id); dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32); dpm_entry->DeviceIndex = map_idx; dpm_entry->MappingInformation = mt_entry->missing_count; dpm_entry->PhysicalBitsMapping = 0; dpm_entry->Reserved1 = 0; sc->dpm_flush_entry[dpm_idx] = 1; sc->dpm_entry_used[dpm_idx] = 1; } else if (dpm_idx == MPS_DPM_BAD_IDX) { mps_dprint(sc, MPS_INFO | MPS_MAPPING, "%s: No space to add an " "entry in the DPM table for volume with target ID %d.\n", __func__, mt_entry->id); } } /** * _mapping_add_to_removal_table - add DPM index to the removal table * @sc: per adapter object * @dpm_idx: Index of DPM entry to remove * * Adds a DPM entry number to the removal table. * * Returns nothing. */ static void _mapping_add_to_removal_table(struct mps_softc *sc, u16 dpm_idx) { struct map_removal_table *remove_entry; u32 i; /* * This is only used to remove entries from the DPM in the controller. * If DPM is not enabled, just return. */ if (!sc->is_dpm_enable) return; /* * Find the first available removal_table entry and add the new entry * there. */ remove_entry = sc->removal_table; for (i = 0; i < sc->max_devices; i++, remove_entry++) { if (remove_entry->dpm_entry_num != MPS_DPM_BAD_IDX) continue; mps_dprint(sc, MPS_MAPPING, "%s: Adding DPM entry %d to table " "for removal.\n", __func__, dpm_idx); remove_entry->dpm_entry_num = dpm_idx; break; } } /** * _mapping_update_missing_count - Update missing count for a device * @sc: per adapter object * @topo_change: Topology change event entry * * Increment the missing count in the mapping table for a device that is not * responding. If Persitent Mapping is used, increment the DPM entry as well. * Currently, this function only increments the missing count if the device * goes missing, so after initialization has completed. This means that the * missing count can only go from 0 to 1 here. The missing count is incremented * during initialization as well, so that's where a target's missing count can * go past 1. * * Returns nothing. */ static void _mapping_update_missing_count(struct mps_softc *sc, struct _map_topology_change *topo_change) { u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); u8 entry; struct _map_phy_change *phy_change; u32 map_idx; struct dev_mapping_table *mt_entry; Mpi2DriverMap0Entry_t *dpm_entry; for (entry = 0; entry < topo_change->num_entries; entry++) { phy_change = &topo_change->phy_details[entry]; if (!phy_change->dev_handle || (phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_NOT_RESPONDING)) continue; map_idx = _mapping_get_mt_idx_from_handle(sc, phy_change-> dev_handle); phy_change->is_processed = 1; if (map_idx == MPS_MAPTABLE_BAD_IDX) { mps_dprint(sc, MPS_INFO | MPS_MAPPING, "%s: device is " "already removed from mapping table\n", __func__); continue; } mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->missing_count < MPS_MAX_MISSING_COUNT) mt_entry->missing_count++; /* * When using Enc/Slot mapping, when a device is removed, it's * mapping table information should be cleared. Otherwise, the * target ID will be incorrect if this same device is re-added * to a different slot. */ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { _mapping_clear_map_entry(mt_entry); } /* * When using device mapping, update the missing count in the * DPM entry, but only if the missing count has changed. */ if (((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) && sc->is_dpm_enable && mt_entry->dpm_entry_num != MPS_DPM_BAD_IDX) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += mt_entry->dpm_entry_num; if (dpm_entry->MappingInformation != mt_entry->missing_count) { dpm_entry->MappingInformation = mt_entry->missing_count; sc->dpm_flush_entry[mt_entry->dpm_entry_num] = 1; } } } } /** * _mapping_find_enc_map_space -find map table entries for enclosure * @sc: per adapter object * @et_entry: enclosure entry * * Search through the mapping table defragment it and provide contiguous * space in map table for a particular enclosure entry * * Returns start index in map table or bad index. */ static u32 _mapping_find_enc_map_space(struct mps_softc *sc, struct enc_mapping_table *et_entry) { u16 vol_mapping_flags; u32 skip_count, end_of_table, map_idx, enc_idx; u16 num_found; u32 start_idx = MPS_MAPTABLE_BAD_IDX; struct dev_mapping_table *mt_entry; struct enc_mapping_table *enc_entry; unsigned char done_flag = 0, found_space; u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs); skip_count = sc->num_rsvd_entries; num_found = 0; vol_mapping_flags = le16toh(sc->ioc_pg8.IRVolumeMappingFlags) & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; /* * The end of the mapping table depends on where volumes are kept, if * IR is enabled. */ if (!sc->ir_firmware) end_of_table = sc->max_devices; else if (vol_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING) end_of_table = sc->max_devices; else end_of_table = sc->max_devices - sc->max_volumes; /* * The skip_count is the number of entries that are reserved at the * beginning of the mapping table. But, it does not include the number * of Physical IDs that are reserved for direct attached devices. Look * through the mapping table after these reserved entries to see if * the devices for this enclosure are already mapped. The PHY bit check * is used to make sure that at least one PHY bit is common between the * enclosure and the device that is already mapped. */ mps_dprint(sc, MPS_MAPPING, "%s: Looking for space in the mapping " "table for added enclosure.\n", __func__); for (map_idx = (max_num_phy_ids + skip_count); map_idx < end_of_table; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if ((et_entry->enclosure_id == mt_entry->physical_id) && (!mt_entry->phy_bits || (mt_entry->phy_bits & et_entry->phy_bits))) { num_found += 1; if (num_found == et_entry->num_slots) { start_idx = (map_idx - num_found) + 1; mps_dprint(sc, MPS_MAPPING, "%s: Found space " "in the mapping for enclosure at map index " "%d.\n", __func__, start_idx); return start_idx; } } else num_found = 0; } /* * If the enclosure's devices are not mapped already, look for * contiguous entries in the mapping table that are not reserved. If * enough entries are found, return the starting index for that space. */ num_found = 0; for (map_idx = (max_num_phy_ids + skip_count); map_idx < end_of_table; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (!(mt_entry->device_info & MPS_DEV_RESERVED)) { num_found += 1; if (num_found == et_entry->num_slots) { start_idx = (map_idx - num_found) + 1; mps_dprint(sc, MPS_MAPPING, "%s: Found space " "in the mapping for enclosure at map index " "%d.\n", __func__, start_idx); return start_idx; } } else num_found = 0; } /* * If here, it means that not enough space in the mapping table was * found to support this enclosure, so go through the enclosure table to * see if any enclosure entries have a missing count. If so, get the * enclosure with the highest missing count and check it to see if there * is enough space for the new enclosure. */ while (!done_flag) { enc_idx = _mapping_get_high_missing_et_idx(sc); if (enc_idx == MPS_ENCTABLE_BAD_IDX) { mps_dprint(sc, MPS_MAPPING, "%s: Not enough space was " "found in the mapping for the added enclosure.\n", __func__); return MPS_MAPTABLE_BAD_IDX; } /* * Found a missing enclosure. Set the skip_search flag so this * enclosure is not checked again for a high missing count if * the loop continues. This way, all missing enclosures can * have their space added together to find enough space in the * mapping table for the added enclosure. The space must be * contiguous. */ mps_dprint(sc, MPS_MAPPING, "%s: Space from a missing " "enclosure was found.\n", __func__); enc_entry = &sc->enclosure_table[enc_idx]; enc_entry->skip_search = 1; /* * Unmark all of the missing enclosure's device's reserved * space. These will be remarked as reserved if this missing * enclosure's space is not used. */ mps_dprint(sc, MPS_MAPPING, "%s: Clear the reserved flag for " "all of the map entries for the enclosure.\n", __func__); mt_entry = &sc->mapping_table[enc_entry->start_index]; for (map_idx = enc_entry->start_index; map_idx < (enc_entry->start_index + enc_entry->num_slots); map_idx++, mt_entry++) mt_entry->device_info &= ~MPS_DEV_RESERVED; /* * Now that space has been unreserved, check again to see if * enough space is available for the new enclosure. */ mps_dprint(sc, MPS_MAPPING, "%s: Check if new mapping space is " "enough for the new enclosure.\n", __func__); found_space = 0; num_found = 0; for (map_idx = (max_num_phy_ids + skip_count); map_idx < end_of_table; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (!(mt_entry->device_info & MPS_DEV_RESERVED)) { num_found += 1; if (num_found == et_entry->num_slots) { start_idx = (map_idx - num_found) + 1; found_space = 1; break; } } else num_found = 0; } if (!found_space) continue; /* * If enough space was found, all of the missing enclosures that * will be used for the new enclosure must be added to the * removal table. Then all mappings for the enclosure's devices * and for the enclosure itself need to be cleared. There may be * more than one enclosure to add to the removal table and * clear. */ mps_dprint(sc, MPS_MAPPING, "%s: Found space in the mapping " "for enclosure at map index %d.\n", __func__, start_idx); for (map_idx = start_idx; map_idx < (start_idx + num_found); map_idx++) { enc_entry = sc->enclosure_table; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++, enc_entry++) { if (map_idx < enc_entry->start_index || map_idx > (enc_entry->start_index + enc_entry->num_slots)) continue; if (!enc_entry->removal_flag) { mps_dprint(sc, MPS_MAPPING, "%s: " "Enclosure %d will be removed from " "the mapping table.\n", __func__, enc_idx); enc_entry->removal_flag = 1; _mapping_add_to_removal_table(sc, enc_entry->dpm_entry_num); } mt_entry = &sc->mapping_table[map_idx]; _mapping_clear_map_entry(mt_entry); if (map_idx == (enc_entry->start_index + enc_entry->num_slots - 1)) _mapping_clear_enc_entry(et_entry); } } /* * During the search for space for this enclosure, some entries * in the mapping table may have been unreserved. Go back and * change all of these to reserved again. Only the enclosures * with the removal_flag set should be left as unreserved. The * skip_search flag needs to be cleared as well so that the * enclosure's space will be looked at the next time space is * needed. */ enc_entry = sc->enclosure_table; for (enc_idx = 0; enc_idx < sc->num_enc_table_entries; enc_idx++, enc_entry++) { if (!enc_entry->removal_flag) { mps_dprint(sc, MPS_MAPPING, "%s: Reset the " "reserved flag for all of the map entries " "for enclosure %d.\n", __func__, enc_idx); mt_entry = &sc->mapping_table[enc_entry-> start_index]; for (map_idx = enc_entry->start_index; map_idx < (enc_entry->start_index + enc_entry->num_slots); map_idx++, mt_entry++) mt_entry->device_info |= MPS_DEV_RESERVED; et_entry->skip_search = 0; } } done_flag = 1; } return start_idx; } /** * _mapping_get_dev_info -get information about newly added devices * @sc: per adapter object * @topo_change: Topology change event entry * * Search through the topology change event list and issues sas device pg0 * requests for the newly added device and reserved entries in tables * * Returns nothing */ static void _mapping_get_dev_info(struct mps_softc *sc, struct _map_topology_change *topo_change) { u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); Mpi2ConfigReply_t mpi_reply; Mpi2SasDevicePage0_t sas_device_pg0; u8 entry, enc_idx, phy_idx; u32 map_idx, index, device_info; struct _map_phy_change *phy_change, *tmp_phy_change; uint64_t sas_address; struct enc_mapping_table *et_entry; struct dev_mapping_table *mt_entry; u8 add_code = MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED; int rc = 1; for (entry = 0; entry < topo_change->num_entries; entry++) { phy_change = &topo_change->phy_details[entry]; if (phy_change->is_processed || !phy_change->dev_handle || phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) continue; if (mps_config_get_sas_device_pg0(sc, &mpi_reply, &sas_device_pg0, MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, phy_change->dev_handle)) { phy_change->is_processed = 1; continue; } /* * Always get SATA Identify information because this is used * to determine if Start/Stop Unit should be sent to the drive * when the system is shutdown. */ device_info = le32toh(sas_device_pg0.DeviceInfo); sas_address = le32toh(sas_device_pg0.SASAddress.High); sas_address = (sas_address << 32) | le32toh(sas_device_pg0.SASAddress.Low); if ((device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE) && (device_info & MPI2_SAS_DEVICE_INFO_SATA_DEVICE)) { rc = mpssas_get_sas_address_for_sata_disk(sc, &sas_address, phy_change->dev_handle, device_info, &phy_change->is_SATA_SSD); if (rc) { mps_dprint(sc, MPS_ERROR, "%s: failed to get " "disk type (SSD or HDD) and SAS Address " "for SATA device with handle 0x%04x\n", __func__, phy_change->dev_handle); } } phy_change->physical_id = sas_address; phy_change->slot = le16toh(sas_device_pg0.Slot); phy_change->device_info = device_info; /* * When using Enc/Slot mapping, if this device is an enclosure * make sure that all of its slots can fit into the mapping * table. */ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { /* * The enclosure should already be in the enclosure * table due to the Enclosure Add event. If not, just * continue, nothing can be done. */ enc_idx = _mapping_get_enc_idx_from_handle(sc, topo_change->enc_handle); if (enc_idx == MPS_ENCTABLE_BAD_IDX) { phy_change->is_processed = 1; mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: " "failed to add the device with handle " "0x%04x because the enclosure is not in " "the mapping table\n", __func__, phy_change->dev_handle); continue; } if (!((phy_change->device_info & MPI2_SAS_DEVICE_INFO_END_DEVICE) && (phy_change->device_info & (MPI2_SAS_DEVICE_INFO_SSP_TARGET | MPI2_SAS_DEVICE_INFO_STP_TARGET | MPI2_SAS_DEVICE_INFO_SATA_DEVICE)))) { phy_change->is_processed = 1; continue; } et_entry = &sc->enclosure_table[enc_idx]; /* * If the enclosure already has a start_index, it's been * mapped, so go to the next Topo change. */ if (et_entry->start_index != MPS_MAPTABLE_BAD_IDX) continue; /* * If the Expander Handle is 0, the devices are direct * attached. In that case, the start_index must be just * after the reserved entries. Otherwise, find space in * the mapping table for the enclosure's devices. */ if (!topo_change->exp_handle) { map_idx = sc->num_rsvd_entries; et_entry->start_index = map_idx; } else { map_idx = _mapping_find_enc_map_space(sc, et_entry); et_entry->start_index = map_idx; /* * If space cannot be found to hold all of the * enclosure's devices in the mapping table, * there's no need to continue checking the * other devices in this event. Set all of the * phy_details for this event (if the change is * for an add) as already processed because none * of these devices can be added to the mapping * table. */ if (et_entry->start_index == MPS_MAPTABLE_BAD_IDX) { mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: failed to add the enclosure " "with ID 0x%016jx because there is " "no free space available in the " "mapping table for all of the " "enclosure's devices.\n", __func__, (uintmax_t)et_entry->enclosure_id); phy_change->is_processed = 1; for (phy_idx = 0; phy_idx < topo_change->num_entries; phy_idx++) { tmp_phy_change = &topo_change->phy_details [phy_idx]; if (tmp_phy_change->reason == add_code) tmp_phy_change-> is_processed = 1; } break; } } /* * Found space in the mapping table for this enclosure. * Initialize each mapping table entry for the * enclosure. */ mps_dprint(sc, MPS_MAPPING, "%s: Initialize %d map " "entries for the enclosure, starting at map index " " %d.\n", __func__, et_entry->num_slots, map_idx); mt_entry = &sc->mapping_table[map_idx]; for (index = map_idx; index < (et_entry->num_slots + map_idx); index++, mt_entry++) { mt_entry->device_info = MPS_DEV_RESERVED; mt_entry->physical_id = et_entry->enclosure_id; mt_entry->phy_bits = et_entry->phy_bits; mt_entry->missing_count = 0; } } } } /** * _mapping_set_mid_to_eid -set map table data from enclosure table * @sc: per adapter object * @et_entry: enclosure entry * * Returns nothing */ static inline void _mapping_set_mid_to_eid(struct mps_softc *sc, struct enc_mapping_table *et_entry) { struct dev_mapping_table *mt_entry; u16 slots = et_entry->num_slots, map_idx; u32 start_idx = et_entry->start_index; if (start_idx != MPS_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[start_idx]; for (map_idx = 0; map_idx < slots; map_idx++, mt_entry++) mt_entry->physical_id = et_entry->enclosure_id; } } /** * _mapping_clear_removed_entries - mark the entries to be cleared * @sc: per adapter object * * Search through the removal table and mark the entries which needs to be * flushed to DPM and also updates the map table and enclosure table by * clearing the corresponding entries. * * Returns nothing */ static void _mapping_clear_removed_entries(struct mps_softc *sc) { u32 remove_idx; struct map_removal_table *remove_entry; Mpi2DriverMap0Entry_t *dpm_entry; u8 done_flag = 0, num_entries, m, i; struct enc_mapping_table *et_entry, *from, *to; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); if (sc->is_dpm_enable) { remove_entry = sc->removal_table; for (remove_idx = 0; remove_idx < sc->max_devices; remove_idx++, remove_entry++) { if (remove_entry->dpm_entry_num != MPS_DPM_BAD_IDX) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *) sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += remove_entry->dpm_entry_num; dpm_entry->PhysicalIdentifier.Low = 0; dpm_entry->PhysicalIdentifier.High = 0; dpm_entry->DeviceIndex = 0; dpm_entry->MappingInformation = 0; dpm_entry->PhysicalBitsMapping = 0; sc->dpm_flush_entry[remove_entry-> dpm_entry_num] = 1; sc->dpm_entry_used[remove_entry->dpm_entry_num] = 0; remove_entry->dpm_entry_num = MPS_DPM_BAD_IDX; } } } /* * When using Enc/Slot mapping, if a new enclosure was added and old * enclosure space was needed, the enclosure table may now have gaps * that need to be closed. All enclosure mappings need to be contiguous * so that space can be reused correctly if available. */ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { num_entries = sc->num_enc_table_entries; while (!done_flag) { done_flag = 1; et_entry = sc->enclosure_table; for (i = 0; i < num_entries; i++, et_entry++) { if (!et_entry->enc_handle && et_entry-> init_complete) { done_flag = 0; if (i != (num_entries - 1)) { from = &sc->enclosure_table [i+1]; to = &sc->enclosure_table[i]; for (m = i; m < (num_entries - 1); m++, from++, to++) { _mapping_set_mid_to_eid (sc, to); *to = *from; } _mapping_clear_enc_entry(to); sc->num_enc_table_entries--; num_entries = sc->num_enc_table_entries; } else { _mapping_clear_enc_entry (et_entry); sc->num_enc_table_entries--; num_entries = sc->num_enc_table_entries; } } } } } } /** * _mapping_add_new_device -Add the new device into mapping table * @sc: per adapter object * @topo_change: Topology change event entry * * Search through the topology change event list and update map table, * enclosure table and DPM pages for the newly added devices. * * Returns nothing */ static void _mapping_add_new_device(struct mps_softc *sc, struct _map_topology_change *topo_change) { u8 enc_idx, missing_cnt, is_removed = 0; u16 dpm_idx; u32 search_idx, map_idx; u32 entry; struct dev_mapping_table *mt_entry; struct enc_mapping_table *et_entry; struct _map_phy_change *phy_change; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); Mpi2DriverMap0Entry_t *dpm_entry; uint64_t temp64_var; u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT; u8 hdr_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER); u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs); for (entry = 0; entry < topo_change->num_entries; entry++) { phy_change = &topo_change->phy_details[entry]; if (phy_change->is_processed) continue; if (phy_change->reason != MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED || !phy_change->dev_handle) { phy_change->is_processed = 1; continue; } if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { enc_idx = _mapping_get_enc_idx_from_handle (sc, topo_change->enc_handle); if (enc_idx == MPS_ENCTABLE_BAD_IDX) { phy_change->is_processed = 1; mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: " "failed to add the device with handle " "0x%04x because the enclosure is not in " "the mapping table\n", __func__, phy_change->dev_handle); continue; } /* * If the enclosure's start_index is BAD here, it means * that there is no room in the mapping table to cover * all of the devices that could be in the enclosure. * There's no reason to process any of the devices for * this enclosure since they can't be mapped. */ et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->start_index == MPS_MAPTABLE_BAD_IDX) { phy_change->is_processed = 1; mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: " "failed to add the device with handle " "0x%04x because there is no free space " "available in the mapping table\n", __func__, phy_change->dev_handle); continue; } /* * Add this device to the mapping table at the correct * offset where space was found to map the enclosure. * Then setup the DPM entry information if being used. */ map_idx = et_entry->start_index + phy_change->slot - et_entry->start_slot; mt_entry = &sc->mapping_table[map_idx]; mt_entry->physical_id = phy_change->physical_id; mt_entry->id = map_idx; mt_entry->dev_handle = phy_change->dev_handle; mt_entry->missing_count = 0; mt_entry->dpm_entry_num = et_entry->dpm_entry_num; mt_entry->device_info = phy_change->device_info | (MPS_DEV_RESERVED | MPS_MAP_IN_USE); if (sc->is_dpm_enable) { dpm_idx = et_entry->dpm_entry_num; if (dpm_idx == MPS_DPM_BAD_IDX) dpm_idx = _mapping_get_dpm_idx_from_id (sc, et_entry->enclosure_id, et_entry->phy_bits); if (dpm_idx == MPS_DPM_BAD_IDX) { dpm_idx = _mapping_get_free_dpm_idx(sc); if (dpm_idx != MPS_DPM_BAD_IDX) { dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *) sc->dpm_pg0 + hdr_sz); dpm_entry += dpm_idx; dpm_entry-> PhysicalIdentifier.Low = (0xFFFFFFFF & et_entry->enclosure_id); dpm_entry-> PhysicalIdentifier.High = (et_entry->enclosure_id >> 32); dpm_entry->DeviceIndex = (U16)et_entry->start_index; dpm_entry->MappingInformation = et_entry->num_slots; dpm_entry->MappingInformation <<= map_shift; dpm_entry->PhysicalBitsMapping = et_entry->phy_bits; et_entry->dpm_entry_num = dpm_idx; sc->dpm_entry_used[dpm_idx] = 1; sc->dpm_flush_entry[dpm_idx] = 1; phy_change->is_processed = 1; } else { phy_change->is_processed = 1; mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: failed " "to add the device with " "handle 0x%04x to " "persistent table because " "there is no free space " "available\n", __func__, phy_change->dev_handle); } } else { et_entry->dpm_entry_num = dpm_idx; mt_entry->dpm_entry_num = dpm_idx; } } et_entry->init_complete = 1; } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) { /* * Get the mapping table index for this device. If it's * not in the mapping table yet, find a free entry if * one is available. If there are no free entries, look * for the entry that has the highest missing count. If * none of that works to find an entry in the mapping * table, there is a problem. Log a message and just * continue on. */ map_idx = _mapping_get_mt_idx_from_id (sc, phy_change->physical_id); if (map_idx == MPS_MAPTABLE_BAD_IDX) { search_idx = sc->num_rsvd_entries; if (topo_change->exp_handle) search_idx += max_num_phy_ids; map_idx = _mapping_get_free_mt_idx(sc, search_idx); } /* * If an entry will be used that has a missing device, * clear its entry from the DPM in the controller. */ if (map_idx == MPS_MAPTABLE_BAD_IDX) { map_idx = _mapping_get_high_missing_mt_idx(sc); if (map_idx != MPS_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[map_idx]; _mapping_add_to_removal_table(sc, mt_entry->dpm_entry_num); is_removed = 1; mt_entry->init_complete = 0; } } if (map_idx != MPS_MAPTABLE_BAD_IDX) { mt_entry = &sc->mapping_table[map_idx]; mt_entry->physical_id = phy_change->physical_id; mt_entry->id = map_idx; mt_entry->dev_handle = phy_change->dev_handle; mt_entry->missing_count = 0; mt_entry->device_info = phy_change->device_info | (MPS_DEV_RESERVED | MPS_MAP_IN_USE); } else { phy_change->is_processed = 1; mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: " "failed to add the device with handle " "0x%04x because there is no free space " "available in the mapping table\n", __func__, phy_change->dev_handle); continue; } if (sc->is_dpm_enable) { if (mt_entry->dpm_entry_num != MPS_DPM_BAD_IDX) { dpm_idx = mt_entry->dpm_entry_num; dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + hdr_sz); dpm_entry += dpm_idx; missing_cnt = dpm_entry-> MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK; temp64_var = dpm_entry-> PhysicalIdentifier.High; temp64_var = (temp64_var << 32) | dpm_entry->PhysicalIdentifier.Low; /* * If the Mapping Table's info is not * the same as the DPM entry, clear the * init_complete flag so that it's * updated. */ if ((mt_entry->physical_id == temp64_var) && !missing_cnt) mt_entry->init_complete = 1; else mt_entry->init_complete = 0; } else { dpm_idx = _mapping_get_free_dpm_idx(sc); mt_entry->init_complete = 0; } if (dpm_idx != MPS_DPM_BAD_IDX && !mt_entry->init_complete) { mt_entry->dpm_entry_num = dpm_idx; dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + hdr_sz); dpm_entry += dpm_idx; dpm_entry->PhysicalIdentifier.Low = (0xFFFFFFFF & mt_entry->physical_id); dpm_entry->PhysicalIdentifier.High = (mt_entry->physical_id >> 32); dpm_entry->DeviceIndex = (U16) map_idx; dpm_entry->MappingInformation = 0; dpm_entry->PhysicalBitsMapping = 0; sc->dpm_entry_used[dpm_idx] = 1; sc->dpm_flush_entry[dpm_idx] = 1; phy_change->is_processed = 1; } else if (dpm_idx == MPS_DPM_BAD_IDX) { phy_change->is_processed = 1; mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: failed to add the device with " "handle 0x%04x to persistent table " "because there is no free space " "available\n", __func__, phy_change->dev_handle); } } mt_entry->init_complete = 1; } phy_change->is_processed = 1; } if (is_removed) _mapping_clear_removed_entries(sc); } /** * _mapping_flush_dpm_pages -Flush the DPM pages to NVRAM * @sc: per adapter object * * Returns nothing */ static void _mapping_flush_dpm_pages(struct mps_softc *sc) { Mpi2DriverMap0Entry_t *dpm_entry; Mpi2ConfigReply_t mpi_reply; Mpi2DriverMappingPage0_t config_page; u16 entry_num; for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++) { if (!sc->dpm_flush_entry[entry_num]) continue; memset(&config_page, 0, sizeof(Mpi2DriverMappingPage0_t)); memcpy(&config_page.Header, (u8 *)sc->dpm_pg0, sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry = (Mpi2DriverMap0Entry_t *) ((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); dpm_entry += entry_num; dpm_entry->MappingInformation = htole16(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = htole16(dpm_entry->DeviceIndex); dpm_entry->PhysicalBitsMapping = htole32(dpm_entry-> PhysicalBitsMapping); memcpy(&config_page.Entry, (u8 *)dpm_entry, sizeof(Mpi2DriverMap0Entry_t)); /* TODO-How to handle failed writes? */ mps_dprint(sc, MPS_MAPPING, "%s: Flushing DPM entry %d.\n", __func__, entry_num); if (mps_config_set_dpm_pg0(sc, &mpi_reply, &config_page, entry_num)) { mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: Flush of " "DPM entry %d for device failed\n", __func__, entry_num); } else sc->dpm_flush_entry[entry_num] = 0; dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); dpm_entry->DeviceIndex = le16toh(dpm_entry->DeviceIndex); dpm_entry->PhysicalBitsMapping = le32toh(dpm_entry-> PhysicalBitsMapping); } } /** * _mapping_allocate_memory- allocates the memory required for mapping tables * @sc: per adapter object * * Allocates the memory for all the tables required for host mapping * * Return 0 on success or non-zero on failure. */ int mps_mapping_allocate_memory(struct mps_softc *sc) { uint32_t dpm_pg0_sz; - sc->mapping_table = malloc((sizeof(struct dev_mapping_table) * - sc->max_devices), M_MPT2, M_ZERO|M_NOWAIT); + sc->mapping_table = mallocarray(sc->max_devices, + sizeof(struct dev_mapping_table), M_MPT2, M_ZERO|M_NOWAIT); if (!sc->mapping_table) goto free_resources; - sc->removal_table = malloc((sizeof(struct map_removal_table) * - sc->max_devices), M_MPT2, M_ZERO|M_NOWAIT); + sc->removal_table = mallocarray(sc->max_devices, + sizeof(struct map_removal_table), M_MPT2, M_ZERO|M_NOWAIT); if (!sc->removal_table) goto free_resources; - sc->enclosure_table = malloc((sizeof(struct enc_mapping_table) * - sc->max_enclosures), M_MPT2, M_ZERO|M_NOWAIT); + sc->enclosure_table = mallocarray(sc->max_enclosures, + sizeof(struct enc_mapping_table), M_MPT2, M_ZERO|M_NOWAIT); if (!sc->enclosure_table) goto free_resources; - sc->dpm_entry_used = malloc((sizeof(u8) * sc->max_dpm_entries), + sc->dpm_entry_used = mallocarray(sc->max_dpm_entries, sizeof(u8), M_MPT2, M_ZERO|M_NOWAIT); if (!sc->dpm_entry_used) goto free_resources; - sc->dpm_flush_entry = malloc((sizeof(u8) * sc->max_dpm_entries), + sc->dpm_flush_entry = mallocarray(sc->max_dpm_entries, sizeof(u8), M_MPT2, M_ZERO|M_NOWAIT); if (!sc->dpm_flush_entry) goto free_resources; dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) + (sc->max_dpm_entries * sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY)); sc->dpm_pg0 = malloc(dpm_pg0_sz, M_MPT2, M_ZERO|M_NOWAIT); if (!sc->dpm_pg0) { printf("%s: memory alloc failed for dpm page; disabling dpm\n", __func__); sc->is_dpm_enable = 0; } return 0; free_resources: free(sc->mapping_table, M_MPT2); free(sc->removal_table, M_MPT2); free(sc->enclosure_table, M_MPT2); free(sc->dpm_entry_used, M_MPT2); free(sc->dpm_flush_entry, M_MPT2); free(sc->dpm_pg0, M_MPT2); printf("%s: device initialization failed due to failure in mapping " "table memory allocation\n", __func__); return -1; } /** * mps_mapping_free_memory- frees the memory allocated for mapping tables * @sc: per adapter object * * Returns nothing. */ void mps_mapping_free_memory(struct mps_softc *sc) { free(sc->mapping_table, M_MPT2); free(sc->removal_table, M_MPT2); free(sc->enclosure_table, M_MPT2); free(sc->dpm_entry_used, M_MPT2); free(sc->dpm_flush_entry, M_MPT2); free(sc->dpm_pg0, M_MPT2); } static void _mapping_process_dpm_pg0(struct mps_softc *sc) { u8 missing_cnt, enc_idx; u16 slot_id, entry_num, num_slots; u32 map_idx, dev_idx, start_idx, end_idx; struct dev_mapping_table *mt_entry; Mpi2DriverMap0Entry_t *dpm_entry; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); u16 max_num_phy_ids = le16toh(sc->ioc_pg8.MaxNumPhysicalMappedIDs); struct enc_mapping_table *et_entry; u64 physical_id; u32 phy_bits = 0; /* * start_idx and end_idx are only used for IR. */ if (sc->ir_firmware) _mapping_get_ir_maprange(sc, &start_idx, &end_idx); /* * Look through all of the DPM entries that were read from the * controller and copy them over to the driver's internal table if they * have a non-zero ID. At this point, any ID with a value of 0 would be * invalid, so don't copy it. */ mps_dprint(sc, MPS_MAPPING, "%s: Start copy of %d DPM entries into the " "mapping table.\n", __func__, sc->max_dpm_entries); dpm_entry = (Mpi2DriverMap0Entry_t *) ((uint8_t *) sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); for (entry_num = 0; entry_num < sc->max_dpm_entries; entry_num++, dpm_entry++) { physical_id = dpm_entry->PhysicalIdentifier.High; physical_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; if (!physical_id) { sc->dpm_entry_used[entry_num] = 0; continue; } sc->dpm_entry_used[entry_num] = 1; dpm_entry->MappingInformation = le16toh(dpm_entry-> MappingInformation); missing_cnt = dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK; dev_idx = le16toh(dpm_entry->DeviceIndex); phy_bits = le32toh(dpm_entry->PhysicalBitsMapping); /* * Volumes are at special locations in the mapping table so * account for that. Volume mapping table entries do not depend * on the type of mapping, so continue the loop after adding * volumes to the mapping table. */ if (sc->ir_firmware && (dev_idx >= start_idx) && (dev_idx <= end_idx)) { mt_entry = &sc->mapping_table[dev_idx]; mt_entry->physical_id = dpm_entry->PhysicalIdentifier.High; mt_entry->physical_id = (mt_entry->physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; mt_entry->id = dev_idx; mt_entry->missing_count = missing_cnt; mt_entry->dpm_entry_num = entry_num; mt_entry->device_info = MPS_DEV_RESERVED; continue; } if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { /* * The dev_idx for an enclosure is the start index. If * the start index is within the controller's default * enclosure area, set the number of slots for this * enclosure to the max allowed. Otherwise, it should be * a normal enclosure and the number of slots is in the * DPM entry's Mapping Information. */ if (dev_idx < (sc->num_rsvd_entries + max_num_phy_ids)) { slot_id = 0; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_DA_START_SLOT_1) slot_id = 1; num_slots = max_num_phy_ids; } else { slot_id = 0; num_slots = dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_SLOT_MASK; num_slots >>= MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT; } enc_idx = sc->num_enc_table_entries; if (enc_idx >= sc->max_enclosures) { mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: " "Number of enclosure entries in DPM exceed " "the max allowed of %d.\n", __func__, sc->max_enclosures); break; } sc->num_enc_table_entries++; et_entry = &sc->enclosure_table[enc_idx]; physical_id = dpm_entry->PhysicalIdentifier.High; et_entry->enclosure_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; et_entry->start_index = dev_idx; et_entry->dpm_entry_num = entry_num; et_entry->num_slots = num_slots; et_entry->start_slot = slot_id; et_entry->missing_count = missing_cnt; et_entry->phy_bits = phy_bits; /* * Initialize all entries for this enclosure in the * mapping table and mark them as reserved. The actual * devices have not been processed yet but when they are * they will use these entries. If an entry is found * that already has a valid DPM index, the mapping table * is corrupt. This can happen if the mapping type is * changed without clearing all of the DPM entries in * the controller. */ mt_entry = &sc->mapping_table[dev_idx]; for (map_idx = dev_idx; map_idx < (dev_idx + num_slots); map_idx++, mt_entry++) { if (mt_entry->dpm_entry_num != MPS_DPM_BAD_IDX) { mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: Conflict in mapping table for " " enclosure %d\n", __func__, enc_idx); break; } physical_id = dpm_entry->PhysicalIdentifier.High; mt_entry->physical_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; mt_entry->phy_bits = phy_bits; mt_entry->id = dev_idx; mt_entry->dpm_entry_num = entry_num; mt_entry->missing_count = missing_cnt; mt_entry->device_info = MPS_DEV_RESERVED; } } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) { /* * Device mapping, so simply copy the DPM entries to the * mapping table, but check for a corrupt mapping table * (as described above in Enc/Slot mapping). */ map_idx = dev_idx; mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->dpm_entry_num != MPS_DPM_BAD_IDX) { mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: " "Conflict in mapping table for device %d\n", __func__, map_idx); break; } physical_id = dpm_entry->PhysicalIdentifier.High; mt_entry->physical_id = (physical_id << 32) | dpm_entry->PhysicalIdentifier.Low; mt_entry->phy_bits = phy_bits; mt_entry->id = dev_idx; mt_entry->missing_count = missing_cnt; mt_entry->dpm_entry_num = entry_num; mt_entry->device_info = MPS_DEV_RESERVED; } } /*close the loop for DPM table */ } /* * mps_mapping_check_devices - start of the day check for device availabilty * @sc: per adapter object * * Returns nothing. */ void mps_mapping_check_devices(void *data) { u32 i; struct dev_mapping_table *mt_entry; struct mps_softc *sc = (struct mps_softc *)data; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); struct enc_mapping_table *et_entry; u32 start_idx = 0, end_idx = 0; u8 stop_device_checks = 0; MPS_FUNCTRACE(sc); /* * Clear this flag so that this function is never called again except * within this function if the check needs to be done again. The * purpose is to check for missing devices that are currently in the * mapping table so do this only at driver init after discovery. */ sc->track_mapping_events = 0; /* * callout synchronization * This is used to prevent race conditions for the callout. */ mps_dprint(sc, MPS_MAPPING, "%s: Start check for missing devices.\n", __func__); mtx_assert(&sc->mps_mtx, MA_OWNED); if ((callout_pending(&sc->device_check_callout)) || (!callout_active(&sc->device_check_callout))) { mps_dprint(sc, MPS_MAPPING, "%s: Device Check Callout is " "already pending or not active.\n", __func__); return; } callout_deactivate(&sc->device_check_callout); /* * Use callout to check if any devices in the mapping table have been * processed yet. If ALL devices are marked as not init_complete, no * devices have been processed and mapped. Until devices are mapped * there's no reason to mark them as missing. Continue resetting this * callout until devices have been mapped. */ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { et_entry = sc->enclosure_table; for (i = 0; i < sc->num_enc_table_entries; i++, et_entry++) { if (et_entry->init_complete) { stop_device_checks = 1; break; } } } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) { mt_entry = sc->mapping_table; for (i = 0; i < sc->max_devices; i++, mt_entry++) { if (mt_entry->init_complete) { stop_device_checks = 1; break; } } } /* * Setup another callout check after a delay. Keep doing this until * devices are mapped. */ if (!stop_device_checks) { mps_dprint(sc, MPS_MAPPING, "%s: No devices have been mapped. " "Reset callout to check again after a %d second delay.\n", __func__, MPS_MISSING_CHECK_DELAY); callout_reset(&sc->device_check_callout, MPS_MISSING_CHECK_DELAY * hz, mps_mapping_check_devices, sc); return; } mps_dprint(sc, MPS_MAPPING, "%s: Device check complete.\n", __func__); /* * Depending on the mapping type, check if devices have been processed * and update their missing counts if not processed. */ if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) { et_entry = sc->enclosure_table; for (i = 0; i < sc->num_enc_table_entries; i++, et_entry++) { if (!et_entry->init_complete) { if (et_entry->missing_count < MPS_MAX_MISSING_COUNT) { mps_dprint(sc, MPS_MAPPING, "%s: " "Enclosure %d is missing from the " "topology. Update its missing " "count.\n", __func__, i); et_entry->missing_count++; if (et_entry->dpm_entry_num != MPS_DPM_BAD_IDX) { _mapping_commit_enc_entry(sc, et_entry); } } et_entry->init_complete = 1; } } if (!sc->ir_firmware) return; _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; } else if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) == MPI2_IOCPAGE8_FLAGS_DEVICE_PERSISTENCE_MAPPING) { start_idx = 0; end_idx = sc->max_devices - 1; mt_entry = sc->mapping_table; } /* * The start and end indices have been set above according to the * mapping type. Go through these mappings and update any entries that * do not have the init_complete flag set, which means they are missing. */ if (end_idx == 0) return; for (i = start_idx; i < (end_idx + 1); i++, mt_entry++) { if (mt_entry->device_info & MPS_DEV_RESERVED && !mt_entry->physical_id) mt_entry->init_complete = 1; else if (mt_entry->device_info & MPS_DEV_RESERVED) { if (!mt_entry->init_complete) { mps_dprint(sc, MPS_MAPPING, "%s: Device in " "mapping table at index %d is missing from " "topology. Update its missing count.\n", __func__, i); if (mt_entry->missing_count < MPS_MAX_MISSING_COUNT) { mt_entry->missing_count++; if (mt_entry->dpm_entry_num != MPS_DPM_BAD_IDX) { _mapping_commit_map_entry(sc, mt_entry); } } mt_entry->init_complete = 1; } } } } /** * mps_mapping_initialize - initialize mapping tables * @sc: per adapter object * * Read controller persitant mapping tables into internal data area. * * Return 0 for success or non-zero for failure. */ int mps_mapping_initialize(struct mps_softc *sc) { uint16_t volume_mapping_flags, dpm_pg0_sz; uint32_t i; Mpi2ConfigReply_t mpi_reply; int error; uint8_t retry_count; uint16_t ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); /* The additional 1 accounts for the virtual enclosure * created for the controller */ sc->max_enclosures = sc->facts->MaxEnclosures + 1; sc->max_expanders = sc->facts->MaxSasExpanders; sc->max_volumes = sc->facts->MaxVolumes; sc->max_devices = sc->facts->MaxTargets + sc->max_volumes; sc->pending_map_events = 0; sc->num_enc_table_entries = 0; sc->num_rsvd_entries = 0; sc->max_dpm_entries = sc->ioc_pg8.MaxPersistentEntries; sc->is_dpm_enable = (sc->max_dpm_entries) ? 1 : 0; sc->track_mapping_events = 0; mps_dprint(sc, MPS_MAPPING, "%s: Mapping table has a max of %d entries " "and DPM has a max of %d entries.\n", __func__, sc->max_devices, sc->max_dpm_entries); if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_DISABLE_PERSISTENT_MAPPING) sc->is_dpm_enable = 0; if (ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_RESERVED_TARGETID_0) sc->num_rsvd_entries = 1; volume_mapping_flags = sc->ioc_pg8.IRVolumeMappingFlags & MPI2_IOCPAGE8_IRFLAGS_MASK_VOLUME_MAPPING_MODE; if (sc->ir_firmware && (volume_mapping_flags == MPI2_IOCPAGE8_IRFLAGS_LOW_VOLUME_MAPPING)) sc->num_rsvd_entries += sc->max_volumes; error = mps_mapping_allocate_memory(sc); if (error) return (error); for (i = 0; i < sc->max_devices; i++) _mapping_clear_map_entry(sc->mapping_table + i); for (i = 0; i < sc->max_enclosures; i++) _mapping_clear_enc_entry(sc->enclosure_table + i); for (i = 0; i < sc->max_devices; i++) { sc->removal_table[i].dev_handle = 0; sc->removal_table[i].dpm_entry_num = MPS_DPM_BAD_IDX; } memset(sc->dpm_entry_used, 0, sc->max_dpm_entries); memset(sc->dpm_flush_entry, 0, sc->max_dpm_entries); if (sc->is_dpm_enable) { dpm_pg0_sz = sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER) + (sc->max_dpm_entries * sizeof(MPI2_CONFIG_PAGE_DRIVER_MAP0_ENTRY)); retry_count = 0; retry_read_dpm: if (mps_config_get_dpm_pg0(sc, &mpi_reply, sc->dpm_pg0, dpm_pg0_sz)) { mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: DPM page " "read failed.\n", __func__); if (retry_count < 3) { retry_count++; goto retry_read_dpm; } sc->is_dpm_enable = 0; } } if (sc->is_dpm_enable) _mapping_process_dpm_pg0(sc); else { mps_dprint(sc, MPS_MAPPING, "%s: DPM processing is disabled. " "Device mappings will not persist across reboots or " "resets.\n", __func__); } sc->track_mapping_events = 1; return 0; } /** * mps_mapping_exit - clear mapping table and associated memory * @sc: per adapter object * * Returns nothing. */ void mps_mapping_exit(struct mps_softc *sc) { _mapping_flush_dpm_pages(sc); mps_mapping_free_memory(sc); } /** * mps_mapping_get_tid - return the target id for sas device and handle * @sc: per adapter object * @sas_address: sas address of the device * @handle: device handle * * Returns valid target ID on success or BAD_ID. */ unsigned int mps_mapping_get_tid(struct mps_softc *sc, uint64_t sas_address, u16 handle) { u32 map_idx; struct dev_mapping_table *mt_entry; for (map_idx = 0; map_idx < sc->max_devices; map_idx++) { mt_entry = &sc->mapping_table[map_idx]; if (mt_entry->dev_handle == handle && mt_entry->physical_id == sas_address) return mt_entry->id; } return MPS_MAP_BAD_ID; } /** * mps_mapping_get_tid_from_handle - find a target id in mapping table using * only the dev handle. This is just a wrapper function for the local function * _mapping_get_mt_idx_from_handle. * @sc: per adapter object * @handle: device handle * * Returns valid target ID on success or BAD_ID. */ unsigned int mps_mapping_get_tid_from_handle(struct mps_softc *sc, u16 handle) { return (_mapping_get_mt_idx_from_handle(sc, handle)); } /** * mps_mapping_get_raid_tid - return the target id for raid device * @sc: per adapter object * @wwid: world wide identifier for raid volume * @volHandle: volume device handle * * Returns valid target ID on success or BAD_ID. */ unsigned int mps_mapping_get_raid_tid(struct mps_softc *sc, u64 wwid, u16 volHandle) { u32 start_idx, end_idx, map_idx; struct dev_mapping_table *mt_entry; _mapping_get_ir_maprange(sc, &start_idx, &end_idx); mt_entry = &sc->mapping_table[start_idx]; for (map_idx = start_idx; map_idx <= end_idx; map_idx++, mt_entry++) { if (mt_entry->dev_handle == volHandle && mt_entry->physical_id == wwid) return mt_entry->id; } return MPS_MAP_BAD_ID; } /** * mps_mapping_get_raid_tid_from_handle - find raid device in mapping table * using only the volume dev handle. This is just a wrapper function for the * local function _mapping_get_ir_mt_idx_from_handle. * @sc: per adapter object * @volHandle: volume device handle * * Returns valid target ID on success or BAD_ID. */ unsigned int mps_mapping_get_raid_tid_from_handle(struct mps_softc *sc, u16 volHandle) { return (_mapping_get_ir_mt_idx_from_handle(sc, volHandle)); } /** * mps_mapping_enclosure_dev_status_change_event - handle enclosure events * @sc: per adapter object * @event_data: event data payload * * Return nothing. */ void mps_mapping_enclosure_dev_status_change_event(struct mps_softc *sc, Mpi2EventDataSasEnclDevStatusChange_t *event_data) { u8 enc_idx, missing_count; struct enc_mapping_table *et_entry; Mpi2DriverMap0Entry_t *dpm_entry; u16 ioc_pg8_flags = le16toh(sc->ioc_pg8.Flags); u8 map_shift = MPI2_DRVMAP0_MAPINFO_SLOT_SHIFT; u8 update_phy_bits = 0; u32 saved_phy_bits; uint64_t temp64_var; if ((ioc_pg8_flags & MPI2_IOCPAGE8_FLAGS_MASK_MAPPING_MODE) != MPI2_IOCPAGE8_FLAGS_ENCLOSURE_SLOT_MAPPING) goto out; dpm_entry = (Mpi2DriverMap0Entry_t *)((u8 *)sc->dpm_pg0 + sizeof(MPI2_CONFIG_EXTENDED_PAGE_HEADER)); if (event_data->ReasonCode == MPI2_EVENT_SAS_ENCL_RC_ADDED) { if (!event_data->NumSlots) { mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: Enclosure " "with handle = 0x%x reported 0 slots.\n", __func__, le16toh(event_data->EnclosureHandle)); goto out; } temp64_var = event_data->EnclosureLogicalID.High; temp64_var = (temp64_var << 32) | event_data->EnclosureLogicalID.Low; enc_idx = _mapping_get_enc_idx_from_id(sc, temp64_var, event_data->PhyBits); /* * If the Added enclosure is already in the Enclosure Table, * make sure that all the the enclosure info is up to date. If * the enclosure was missing and has just been added back, or if * the enclosure's Phy Bits have changed, clear the missing * count and update the Phy Bits in the mapping table and in the * DPM, if it's being used. */ if (enc_idx != MPS_ENCTABLE_BAD_IDX) { et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->init_complete && !et_entry->missing_count) { mps_dprint(sc, MPS_MAPPING, "%s: Enclosure %d " "is already present with handle = 0x%x\n", __func__, enc_idx, et_entry->enc_handle); goto out; } et_entry->enc_handle = le16toh(event_data-> EnclosureHandle); et_entry->start_slot = le16toh(event_data->StartSlot); saved_phy_bits = et_entry->phy_bits; et_entry->phy_bits |= le32toh(event_data->PhyBits); if (saved_phy_bits != et_entry->phy_bits) update_phy_bits = 1; if (et_entry->missing_count || update_phy_bits) { et_entry->missing_count = 0; if (sc->is_dpm_enable && et_entry->dpm_entry_num != MPS_DPM_BAD_IDX) { dpm_entry += et_entry->dpm_entry_num; missing_count = (u8)(dpm_entry->MappingInformation & MPI2_DRVMAP0_MAPINFO_MISSING_MASK); if (missing_count || update_phy_bits) { dpm_entry->MappingInformation = et_entry->num_slots; dpm_entry->MappingInformation <<= map_shift; dpm_entry->PhysicalBitsMapping = et_entry->phy_bits; sc->dpm_flush_entry[et_entry-> dpm_entry_num] = 1; } } } } else { /* * This is a new enclosure that is being added. * Initialize the Enclosure Table entry. It will be * finalized when a device is added for the enclosure * and the enclosure has enough space in the Mapping * Table to map its devices. */ enc_idx = sc->num_enc_table_entries; if (enc_idx >= sc->max_enclosures) { mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: " "Enclosure cannot be added to mapping " "table because it's full.\n", __func__); goto out; } sc->num_enc_table_entries++; et_entry = &sc->enclosure_table[enc_idx]; et_entry->enc_handle = le16toh(event_data-> EnclosureHandle); et_entry->enclosure_id = le64toh(event_data-> EnclosureLogicalID.High); et_entry->enclosure_id = ((et_entry->enclosure_id << 32) | le64toh(event_data->EnclosureLogicalID.Low)); et_entry->start_index = MPS_MAPTABLE_BAD_IDX; et_entry->dpm_entry_num = MPS_DPM_BAD_IDX; et_entry->num_slots = le16toh(event_data->NumSlots); et_entry->start_slot = le16toh(event_data->StartSlot); et_entry->phy_bits = le32toh(event_data->PhyBits); } et_entry->init_complete = 1; } else if (event_data->ReasonCode == MPI2_EVENT_SAS_ENCL_RC_NOT_RESPONDING) { /* * An enclosure was removed. Update its missing count and then * update the DPM entry with the new missing count for the * enclosure. */ enc_idx = _mapping_get_enc_idx_from_handle(sc, le16toh(event_data->EnclosureHandle)); if (enc_idx == MPS_ENCTABLE_BAD_IDX) { mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: Cannot " "unmap enclosure %d because it has already been " "deleted.\n", __func__, enc_idx); goto out; } et_entry = &sc->enclosure_table[enc_idx]; if (et_entry->missing_count < MPS_MAX_MISSING_COUNT) et_entry->missing_count++; if (sc->is_dpm_enable && et_entry->dpm_entry_num != MPS_DPM_BAD_IDX) { dpm_entry += et_entry->dpm_entry_num; dpm_entry->MappingInformation = et_entry->num_slots; dpm_entry->MappingInformation <<= map_shift; dpm_entry->MappingInformation |= et_entry->missing_count; sc->dpm_flush_entry[et_entry->dpm_entry_num] = 1; } et_entry->init_complete = 1; } out: _mapping_flush_dpm_pages(sc); if (sc->pending_map_events) sc->pending_map_events--; } /** * mps_mapping_topology_change_event - handle topology change events * @sc: per adapter object * @event_data: event data payload * * Returns nothing. */ void mps_mapping_topology_change_event(struct mps_softc *sc, Mpi2EventDataSasTopologyChangeList_t *event_data) { struct _map_topology_change topo_change; struct _map_phy_change *phy_change; Mpi2EventSasTopoPhyEntry_t *event_phy_change; u8 i, num_entries; topo_change.enc_handle = le16toh(event_data->EnclosureHandle); topo_change.exp_handle = le16toh(event_data->ExpanderDevHandle); num_entries = event_data->NumEntries; topo_change.num_entries = num_entries; topo_change.start_phy_num = event_data->StartPhyNum; topo_change.num_phys = event_data->NumPhys; topo_change.exp_status = event_data->ExpStatus; event_phy_change = event_data->PHY; topo_change.phy_details = NULL; if (!num_entries) goto out; - phy_change = malloc(sizeof(struct _map_phy_change) * num_entries, + phy_change = mallocarray(num_entries, sizeof(struct _map_phy_change), M_MPT2, M_NOWAIT|M_ZERO); topo_change.phy_details = phy_change; if (!phy_change) goto out; for (i = 0; i < num_entries; i++, event_phy_change++, phy_change++) { phy_change->dev_handle = le16toh(event_phy_change-> AttachedDevHandle); phy_change->reason = event_phy_change->PhyStatus & MPI2_EVENT_SAS_TOPO_RC_MASK; } _mapping_update_missing_count(sc, &topo_change); _mapping_get_dev_info(sc, &topo_change); _mapping_clear_removed_entries(sc); _mapping_add_new_device(sc, &topo_change); out: free(topo_change.phy_details, M_MPT2); _mapping_flush_dpm_pages(sc); if (sc->pending_map_events) sc->pending_map_events--; } /** * mps_mapping_ir_config_change_event - handle IR config change list events * @sc: per adapter object * @event_data: event data payload * * Returns nothing. */ void mps_mapping_ir_config_change_event(struct mps_softc *sc, Mpi2EventDataIrConfigChangeList_t *event_data) { Mpi2EventIrConfigElement_t *element; int i; u64 *wwid_table; u32 map_idx, flags; struct dev_mapping_table *mt_entry; u16 element_flags; - wwid_table = malloc(sizeof(u64) * event_data->NumElements, M_MPT2, + wwid_table = mallocarray(event_data->NumElements, sizeof(u64), M_MPT2, M_NOWAIT | M_ZERO); if (!wwid_table) goto out; element = (Mpi2EventIrConfigElement_t *)&event_data->ConfigElement[0]; flags = le32toh(event_data->Flags); /* * For volume changes, get the WWID for the volume and put it in a * table to be used in the processing of the IR change event. */ for (i = 0; i < event_data->NumElements; i++, element++) { element_flags = le16toh(element->ElementFlags); if ((element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_ADDED) && (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_REMOVED) && (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_NO_CHANGE) && (element->ReasonCode != MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED)) continue; if ((element_flags & MPI2_EVENT_IR_CHANGE_EFLAGS_ELEMENT_TYPE_MASK) == MPI2_EVENT_IR_CHANGE_EFLAGS_VOLUME_ELEMENT) { mps_config_get_volume_wwid(sc, le16toh(element->VolDevHandle), &wwid_table[i]); } } /* * Check the ReasonCode for each element in the IR event and Add/Remove * Volumes or Physical Disks of Volumes to/from the mapping table. Use * the WWIDs gotten above in wwid_table. */ if (flags == MPI2_EVENT_IR_CHANGE_FLAGS_FOREIGN_CONFIG) goto out; else { element = (Mpi2EventIrConfigElement_t *)&event_data-> ConfigElement[0]; for (i = 0; i < event_data->NumElements; i++, element++) { if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_ADDED || element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_VOLUME_CREATED) { map_idx = _mapping_get_ir_mt_idx_from_wwid (sc, wwid_table[i]); if (map_idx != MPS_MAPTABLE_BAD_IDX) { /* * The volume is already in the mapping * table. Just update it's info. */ mt_entry = &sc->mapping_table[map_idx]; mt_entry->id = map_idx; mt_entry->dev_handle = le16toh (element->VolDevHandle); mt_entry->device_info = MPS_DEV_RESERVED | MPS_MAP_IN_USE; _mapping_update_ir_missing_cnt(sc, map_idx, element, wwid_table[i]); continue; } /* * Volume is not in mapping table yet. Find a * free entry in the mapping table at the * volume mapping locations. If no entries are * available, this is an error because it means * there are more volumes than can be mapped * and that should never happen for volumes. */ map_idx = _mapping_get_free_ir_mt_idx(sc); if (map_idx == MPS_MAPTABLE_BAD_IDX) { mps_dprint(sc, MPS_ERROR | MPS_MAPPING, "%s: failed to add the volume with " "handle 0x%04x because there is no " "free space available in the " "mapping table\n", __func__, le16toh(element->VolDevHandle)); continue; } mt_entry = &sc->mapping_table[map_idx]; mt_entry->physical_id = wwid_table[i]; mt_entry->id = map_idx; mt_entry->dev_handle = le16toh(element-> VolDevHandle); mt_entry->device_info = MPS_DEV_RESERVED | MPS_MAP_IN_USE; _mapping_update_ir_missing_cnt(sc, map_idx, element, wwid_table[i]); } else if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_REMOVED) { map_idx = _mapping_get_ir_mt_idx_from_wwid(sc, wwid_table[i]); if (map_idx == MPS_MAPTABLE_BAD_IDX) { mps_dprint(sc, MPS_MAPPING,"%s: Failed " "to remove a volume because it has " "already been removed.\n", __func__); continue; } _mapping_update_ir_missing_cnt(sc, map_idx, element, wwid_table[i]); } else if (element->ReasonCode == MPI2_EVENT_IR_CHANGE_RC_VOLUME_DELETED) { map_idx = _mapping_get_mt_idx_from_handle(sc, le16toh(element->VolDevHandle)); if (map_idx == MPS_MAPTABLE_BAD_IDX) { mps_dprint(sc, MPS_MAPPING,"%s: Failed " "to remove volume with handle " "0x%04x because it has already " "been removed.\n", __func__, le16toh(element->VolDevHandle)); continue; } mt_entry = &sc->mapping_table[map_idx]; _mapping_update_ir_missing_cnt(sc, map_idx, element, mt_entry->physical_id); } } } out: _mapping_flush_dpm_pages(sc); free(wwid_table, M_MPT2); if (sc->pending_map_events) sc->pending_map_events--; } int mps_mapping_dump(SYSCTL_HANDLER_ARGS) { struct mps_softc *sc; struct dev_mapping_table *mt_entry; struct sbuf sbuf; int i, error; sc = (struct mps_softc *)arg1; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 128, req); sbuf_printf(&sbuf, "\nindex physical_id handle id\n"); for (i = 0; i < sc->max_devices; i++) { mt_entry = &sc->mapping_table[i]; if (mt_entry->physical_id == 0) continue; sbuf_printf(&sbuf, "%4d %jx %04x %hd\n", i, mt_entry->physical_id, mt_entry->dev_handle, mt_entry->id); } error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } int mps_mapping_encl_dump(SYSCTL_HANDLER_ARGS) { struct mps_softc *sc; struct enc_mapping_table *enc_entry; struct sbuf sbuf; int i, error; sc = (struct mps_softc *)arg1; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); sbuf_new_for_sysctl(&sbuf, NULL, 128, req); sbuf_printf(&sbuf, "\nindex enclosure_id handle map_index\n"); for (i = 0; i < sc->max_enclosures; i++) { enc_entry = &sc->enclosure_table[i]; if (enc_entry->enclosure_id == 0) continue; sbuf_printf(&sbuf, "%4d %jx %04x %d\n", i, enc_entry->enclosure_id, enc_entry->enc_handle, enc_entry->start_index); } error = sbuf_finish(&sbuf); sbuf_delete(&sbuf); return (error); } Index: head/sys/dev/mpt/mpt_cam.c =================================================================== --- head/sys/dev/mpt/mpt_cam.c (revision 327948) +++ head/sys/dev/mpt/mpt_cam.c (revision 327949) @@ -1,5369 +1,5369 @@ /*- * FreeBSD/CAM specific routines for LSI '909 FC adapters. * FreeBSD Version. * * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-3-Clause * * Copyright (c) 2000, 2001 by Greg Ansley * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice immediately at the beginning of the file, without modification, * this list of conditions, and the following disclaimer. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2002, 2006 by Matthew Jacob * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * Support from Chris Ellsworth in order to make SAS adapters work * is gratefully acknowledged. * * Support from LSI-Logic has also gone a great deal toward making this a * workable subsystem and is gratefully acknowledged. */ /*- * Copyright (c) 2004, Avid Technology, Inc. and its contributors. * Copyright (c) 2005, WHEEL Sp. z o.o. * Copyright (c) 2004, 2005 Justin T. Gibbs * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon including * a substantially similar Disclaimer requirement for further binary * redistribution. * 3. Neither the names of the above listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include "dev/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */ #include "dev/mpt/mpilib/mpi_init.h" #include "dev/mpt/mpilib/mpi_targ.h" #include "dev/mpt/mpilib/mpi_fc.h" #include "dev/mpt/mpilib/mpi_sas.h" #include #include #include static void mpt_poll(struct cam_sim *); static timeout_t mpt_timeout; static void mpt_action(struct cam_sim *, union ccb *); static int mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *); static void mpt_setwidth(struct mpt_softc *, int, int); static void mpt_setsync(struct mpt_softc *, int, int, int); static int mpt_update_spi_config(struct mpt_softc *, int); static mpt_reply_handler_t mpt_scsi_reply_handler; static mpt_reply_handler_t mpt_scsi_tmf_reply_handler; static mpt_reply_handler_t mpt_fc_els_reply_handler; static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *, MSG_DEFAULT_REPLY *); static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int); static int mpt_fc_reset_link(struct mpt_softc *, int); static int mpt_spawn_recovery_thread(struct mpt_softc *mpt); static void mpt_terminate_recovery_thread(struct mpt_softc *mpt); static void mpt_recovery_thread(void *arg); static void mpt_recover_commands(struct mpt_softc *mpt); static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int, target_id_t, lun_id_t, u_int, int); static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int); static void mpt_post_target_command(struct mpt_softc *, request_t *, int); static int mpt_add_els_buffers(struct mpt_softc *mpt); static int mpt_add_target_commands(struct mpt_softc *mpt); static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t); static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t); static void mpt_target_start_io(struct mpt_softc *, union ccb *); static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *); static int mpt_abort_target_cmd(struct mpt_softc *, request_t *); static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *, uint8_t, uint8_t const *, u_int); static void mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t, tgt_resource_t *, int); static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *); static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *); static mpt_reply_handler_t mpt_scsi_tgt_reply_handler; static mpt_reply_handler_t mpt_sata_pass_reply_handler; static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE; static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE; static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE; static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE; static mpt_probe_handler_t mpt_cam_probe; static mpt_attach_handler_t mpt_cam_attach; static mpt_enable_handler_t mpt_cam_enable; static mpt_ready_handler_t mpt_cam_ready; static mpt_event_handler_t mpt_cam_event; static mpt_reset_handler_t mpt_cam_ioc_reset; static mpt_detach_handler_t mpt_cam_detach; static struct mpt_personality mpt_cam_personality = { .name = "mpt_cam", .probe = mpt_cam_probe, .attach = mpt_cam_attach, .enable = mpt_cam_enable, .ready = mpt_cam_ready, .event = mpt_cam_event, .reset = mpt_cam_ioc_reset, .detach = mpt_cam_detach, }; DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND); MODULE_DEPEND(mpt_cam, cam, 1, 1, 1); int mpt_enable_sata_wc = -1; TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc); static int mpt_cam_probe(struct mpt_softc *mpt) { int role; /* * Only attach to nodes that support the initiator or target role * (or want to) or have RAID physical devices that need CAM pass-thru * support. */ if (mpt->do_cfg_role) { role = mpt->cfg_role; } else { role = mpt->role; } if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 || (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) { return (0); } return (ENODEV); } static int mpt_cam_attach(struct mpt_softc *mpt) { struct cam_devq *devq; mpt_handler_t handler; int maxq; int error; MPT_LOCK(mpt); TAILQ_INIT(&mpt->request_timeout_list); maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))? mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt); handler.reply_handler = mpt_scsi_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &scsi_io_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } handler.reply_handler = mpt_scsi_tmf_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &scsi_tmf_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } /* * If we're fibre channel and could support target mode, we register * an ELS reply handler and give it resources. */ if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { handler.reply_handler = mpt_fc_els_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &fc_els_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } if (mpt_add_els_buffers(mpt) == FALSE) { error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } maxq -= mpt->els_cmds_allocated; } /* * If we support target mode, we register a reply handler for it, * but don't add command resources until we actually enable target * mode. */ if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) { handler.reply_handler = mpt_scsi_tgt_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &mpt->scsi_tgt_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } } if (mpt->is_sas) { handler.reply_handler = mpt_sata_pass_reply_handler; error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler, &sata_pass_handler_id); if (error != 0) { MPT_UNLOCK(mpt); goto cleanup; } } /* * We keep one request reserved for timeout TMF requests. */ mpt->tmf_req = mpt_get_request(mpt, FALSE); if (mpt->tmf_req == NULL) { mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } /* * Mark the request as free even though not on the free list. * There is only one TMF request allowed to be outstanding at * a time and the TMF routines perform their own allocation * tracking using the standard state flags. */ mpt->tmf_req->state = REQ_STATE_FREE; maxq--; /* * The rest of this is CAM foo, for which we need to drop our lock */ MPT_UNLOCK(mpt); if (mpt_spawn_recovery_thread(mpt) != 0) { mpt_prt(mpt, "Unable to spawn recovery thread!\n"); error = ENOMEM; goto cleanup; } /* * Create the device queue for our SIM(s). */ devq = cam_simq_alloc(maxq); if (devq == NULL) { mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n"); error = ENOMEM; goto cleanup; } /* * Construct our SIM entry. */ mpt->sim = mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); if (mpt->sim == NULL) { mpt_prt(mpt, "Unable to allocate CAM SIM!\n"); cam_simq_free(devq); error = ENOMEM; goto cleanup; } /* * Register exactly this bus. */ MPT_LOCK(mpt); if (xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) { mpt_prt(mpt, "Bus registration Failed!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "Unable to allocate Path!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } MPT_UNLOCK(mpt); /* * Only register a second bus for RAID physical * devices if the controller supports RAID. */ if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) { return (0); } /* * Create a "bus" to export all hidden disks to CAM. */ mpt->phydisk_sim = mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq); if (mpt->phydisk_sim == NULL) { mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n"); error = ENOMEM; goto cleanup; } /* * Register this bus. */ MPT_LOCK(mpt); if (xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) != CAM_SUCCESS) { mpt_prt(mpt, "Physical Disk Bus registration Failed!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } if (xpt_create_path(&mpt->phydisk_path, NULL, cam_sim_path(mpt->phydisk_sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n"); error = ENOMEM; MPT_UNLOCK(mpt); goto cleanup; } MPT_UNLOCK(mpt); mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n"); return (0); cleanup: mpt_cam_detach(mpt); return (error); } /* * Read FC configuration information */ static int mpt_read_config_info_fc(struct mpt_softc *mpt) { struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; char *topology = NULL; int rv; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0, 0, &mpt->mpt_fcport_page0.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n", mpt->mpt_fcport_page0.Header.PageVersion, mpt->mpt_fcport_page0.Header.PageLength, mpt->mpt_fcport_page0.Header.PageNumber, mpt->mpt_fcport_page0.Header.PageType); rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header, sizeof(mpt->mpt_fcport_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read FC Port Page 0\n"); return (-1); } mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0); switch (mpt->mpt_fcport_page0.CurrentSpeed) { case MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT: mpt->mpt_fcport_speed = 1; break; case MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT: mpt->mpt_fcport_speed = 2; break; case MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT: mpt->mpt_fcport_speed = 10; break; case MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT: mpt->mpt_fcport_speed = 4; break; default: mpt->mpt_fcport_speed = 0; break; } switch (mpt->mpt_fcport_page0.Flags & MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) { case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT: mpt->mpt_fcport_speed = 0; topology = ""; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT: topology = "N-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP: topology = "NL-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT: topology = "F-Port"; break; case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP: topology = "FL-Port"; break; default: mpt->mpt_fcport_speed = 0; topology = "?"; break; } mpt->scinfo.fc.wwnn = ((uint64_t)mpt->mpt_fcport_page0.WWNN.High << 32) | mpt->mpt_fcport_page0.WWNN.Low; mpt->scinfo.fc.wwpn = ((uint64_t)mpt->mpt_fcport_page0.WWPN.High << 32) | mpt->mpt_fcport_page0.WWPN.Low; mpt->scinfo.fc.portid = mpt->mpt_fcport_page0.PortIdentifier; mpt_lprt(mpt, MPT_PRT_INFO, "FC Port Page 0: Topology <%s> WWNN 0x%16jx WWPN 0x%16jx " "Speed %u-Gbit\n", topology, (uintmax_t)mpt->scinfo.fc.wwnn, (uintmax_t)mpt->scinfo.fc.wwpn, mpt->mpt_fcport_speed); MPT_UNLOCK(mpt); ctx = device_get_sysctl_ctx(mpt->dev); tree = device_get_sysctl_tree(mpt->dev); SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwnn", CTLFLAG_RD, &mpt->scinfo.fc.wwnn, "World Wide Node Name"); SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "wwpn", CTLFLAG_RD, &mpt->scinfo.fc.wwpn, "World Wide Port Name"); MPT_LOCK(mpt); return (0); } /* * Set FC configuration information. */ static int mpt_set_initial_config_fc(struct mpt_softc *mpt) { CONFIG_PAGE_FC_PORT_1 fc; U32 fl; int r, doit = 0; int role; r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0, &fc.Header, FALSE, 5000); if (r) { mpt_prt(mpt, "failed to read FC page 1 header\n"); return (mpt_fc_reset_link(mpt, 1)); } r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0, &fc.Header, sizeof (fc), FALSE, 5000); if (r) { mpt_prt(mpt, "failed to read FC page 1\n"); return (mpt_fc_reset_link(mpt, 1)); } mpt2host_config_page_fc_port_1(&fc); /* * Check our flags to make sure we support the role we want. */ doit = 0; role = 0; fl = fc.Flags; if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) { role |= MPT_ROLE_INITIATOR; } if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { role |= MPT_ROLE_TARGET; } fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK; if (mpt->do_cfg_role == 0) { role = mpt->cfg_role; } else { mpt->do_cfg_role = 0; } if (role != mpt->cfg_role) { if (mpt->cfg_role & MPT_ROLE_INITIATOR) { if ((role & MPT_ROLE_INITIATOR) == 0) { mpt_prt(mpt, "adding initiator role\n"); fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT; doit++; } else { mpt_prt(mpt, "keeping initiator role\n"); } } else if (role & MPT_ROLE_INITIATOR) { mpt_prt(mpt, "removing initiator role\n"); doit++; } if (mpt->cfg_role & MPT_ROLE_TARGET) { if ((role & MPT_ROLE_TARGET) == 0) { mpt_prt(mpt, "adding target role\n"); fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG; doit++; } else { mpt_prt(mpt, "keeping target role\n"); } } else if (role & MPT_ROLE_TARGET) { mpt_prt(mpt, "removing target role\n"); doit++; } mpt->role = mpt->cfg_role; } if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) { if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) { mpt_prt(mpt, "adding OXID option\n"); fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID; doit++; } } if (doit) { fc.Flags = fl; host2mpt_config_page_fc_port_1(&fc); r = mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header, sizeof(fc), FALSE, 5000); if (r != 0) { mpt_prt(mpt, "failed to update NVRAM with changes\n"); return (0); } mpt_prt(mpt, "NOTE: NVRAM changes will not take " "effect until next reboot or IOC reset\n"); } return (0); } static int mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo) { ConfigExtendedPageHeader_t hdr; struct mptsas_phyinfo *phyinfo; SasIOUnitPage0_t *buffer; int error, len, i; error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION, 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT, &hdr, 0, 10000); if (error) goto out; if (hdr.ExtPageLength == 0) { error = ENXIO; goto out; } len = hdr.ExtPageLength * 4; buffer = malloc(len, M_DEVBUF, M_NOWAIT|M_ZERO); if (buffer == NULL) { error = ENOMEM; goto out; } error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, 0, &hdr, buffer, len, 0, 10000); if (error) { free(buffer, M_DEVBUF); goto out; } portinfo->num_phys = buffer->NumPhys; - portinfo->phy_info = malloc(sizeof(*portinfo->phy_info) * - portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO); + portinfo->phy_info = mallocarray(portinfo->num_phys, + sizeof(*portinfo->phy_info), M_DEVBUF, M_NOWAIT|M_ZERO); if (portinfo->phy_info == NULL) { free(buffer, M_DEVBUF); error = ENOMEM; goto out; } for (i = 0; i < portinfo->num_phys; i++) { phyinfo = &portinfo->phy_info[i]; phyinfo->phy_num = i; phyinfo->port_id = buffer->PhyData[i].Port; phyinfo->negotiated_link_rate = buffer->PhyData[i].NegotiatedLinkRate; phyinfo->handle = le16toh(buffer->PhyData[i].ControllerDevHandle); } free(buffer, M_DEVBUF); out: return (error); } static int mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info, uint32_t form, uint32_t form_specific) { ConfigExtendedPageHeader_t hdr; SasPhyPage0_t *buffer; int error; error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr, 0, 10000); if (error) goto out; if (hdr.ExtPageLength == 0) { error = ENXIO; goto out; } buffer = malloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); if (buffer == NULL) { error = ENOMEM; goto out; } error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, form + form_specific, &hdr, buffer, sizeof(SasPhyPage0_t), 0, 10000); if (error) { free(buffer, M_DEVBUF); goto out; } phy_info->hw_link_rate = buffer->HwLinkRate; phy_info->programmed_link_rate = buffer->ProgrammedLinkRate; phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle); phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle); free(buffer, M_DEVBUF); out: return (error); } static int mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info, uint32_t form, uint32_t form_specific) { ConfigExtendedPageHeader_t hdr; SasDevicePage0_t *buffer; uint64_t sas_address; int error = 0; bzero(device_info, sizeof(*device_info)); error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE, &hdr, 0, 10000); if (error) goto out; if (hdr.ExtPageLength == 0) { error = ENXIO; goto out; } buffer = malloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO); if (buffer == NULL) { error = ENOMEM; goto out; } error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT, form + form_specific, &hdr, buffer, sizeof(SasDevicePage0_t), 0, 10000); if (error) { free(buffer, M_DEVBUF); goto out; } device_info->dev_handle = le16toh(buffer->DevHandle); device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle); device_info->enclosure_handle = le16toh(buffer->EnclosureHandle); device_info->slot = le16toh(buffer->Slot); device_info->phy_num = buffer->PhyNum; device_info->physical_port = buffer->PhysicalPort; device_info->target_id = buffer->TargetID; device_info->bus = buffer->Bus; bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t)); device_info->sas_address = le64toh(sas_address); device_info->device_info = le32toh(buffer->DeviceInfo); free(buffer, M_DEVBUF); out: return (error); } /* * Read SAS configuration information. Nothing to do yet. */ static int mpt_read_config_info_sas(struct mpt_softc *mpt) { struct mptsas_portinfo *portinfo; struct mptsas_phyinfo *phyinfo; int error, i; portinfo = malloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO); if (portinfo == NULL) return (ENOMEM); error = mptsas_sas_io_unit_pg0(mpt, portinfo); if (error) { free(portinfo, M_DEVBUF); return (0); } for (i = 0; i < portinfo->num_phys; i++) { phyinfo = &portinfo->phy_info[i]; error = mptsas_sas_phy_pg0(mpt, phyinfo, (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER << MPI_SAS_PHY_PGAD_FORM_SHIFT), i); if (error) break; error = mptsas_sas_device_pg0(mpt, &phyinfo->identify, (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), phyinfo->handle); if (error) break; phyinfo->identify.phy_num = phyinfo->phy_num = i; if (phyinfo->attached.dev_handle) error = mptsas_sas_device_pg0(mpt, &phyinfo->attached, (MPI_SAS_DEVICE_PGAD_FORM_HANDLE << MPI_SAS_DEVICE_PGAD_FORM_SHIFT), phyinfo->attached.dev_handle); if (error) break; } mpt->sas_portinfo = portinfo; return (0); } static void mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo, int enabled) { SataPassthroughRequest_t *pass; request_t *req; int error, status; req = mpt_get_request(mpt, 0); if (req == NULL) return; pass = req->req_vbuf; bzero(pass, sizeof(SataPassthroughRequest_t)); pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH; pass->TargetID = devinfo->target_id; pass->Bus = devinfo->bus; pass->PassthroughFlags = 0; pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED; pass->DataLength = 0; pass->MsgContext = htole32(req->index | sata_pass_handler_id); pass->CommandFIS[0] = 0x27; pass->CommandFIS[1] = 0x80; pass->CommandFIS[2] = 0xef; pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82; pass->CommandFIS[7] = 0x40; pass->CommandFIS[15] = 0x08; mpt_check_doorbell(mpt); mpt_send_cmd(mpt, req); error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0, 10 * 1000); if (error) { mpt_free_request(mpt, req); printf("error %d sending passthrough\n", error); return; } status = le16toh(req->IOCStatus); if (status != MPI_IOCSTATUS_SUCCESS) { mpt_free_request(mpt, req); printf("IOCSTATUS %d\n", status); return; } mpt_free_request(mpt, req); } /* * Set SAS configuration information. Nothing to do yet. */ static int mpt_set_initial_config_sas(struct mpt_softc *mpt) { struct mptsas_phyinfo *phyinfo; int i; if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) { for (i = 0; i < mpt->sas_portinfo->num_phys; i++) { phyinfo = &mpt->sas_portinfo->phy_info[i]; if (phyinfo->attached.dev_handle == 0) continue; if ((phyinfo->attached.device_info & MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0) continue; if (bootverbose) device_printf(mpt->dev, "%sabling SATA WC on phy %d\n", (mpt_enable_sata_wc) ? "En" : "Dis", i); mptsas_set_sata_wc(mpt, &phyinfo->attached, mpt_enable_sata_wc); } } return (0); } static int mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { if (req != NULL) { if (reply_frame != NULL) { req->IOCStatus = le16toh(reply_frame->IOCStatus); } req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; TAILQ_REMOVE(&mpt->request_pending_list, req, links); if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { wakeup(req); } else if ((req->state & REQ_STATE_TIMEDOUT) != 0) { /* * Whew- we can free this request (late completion) */ mpt_free_request(mpt, req); } } return (TRUE); } /* * Read SCSI configuration information */ static int mpt_read_config_info_spi(struct mpt_softc *mpt) { int rv, i; rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0, &mpt->mpt_port_page0.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n", mpt->mpt_port_page0.Header.PageVersion, mpt->mpt_port_page0.Header.PageLength, mpt->mpt_port_page0.Header.PageNumber, mpt->mpt_port_page0.Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0, &mpt->mpt_port_page1.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n", mpt->mpt_port_page1.Header.PageVersion, mpt->mpt_port_page1.Header.PageLength, mpt->mpt_port_page1.Header.PageNumber, mpt->mpt_port_page1.Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0, &mpt->mpt_port_page2.Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n", mpt->mpt_port_page2.Header.PageVersion, mpt->mpt_port_page2.Header.PageLength, mpt->mpt_port_page2.Header.PageNumber, mpt->mpt_port_page2.Header.PageType); for (i = 0; i < 16; i++) { rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i, mpt->mpt_dev_page0[i].Header.PageVersion, mpt->mpt_dev_page0[i].Header.PageLength, mpt->mpt_dev_page0[i].Header.PageNumber, mpt->mpt_dev_page0[i].Header.PageType); rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE, 1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000); if (rv) { return (-1); } mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i, mpt->mpt_dev_page1[i].Header.PageVersion, mpt->mpt_dev_page1[i].Header.PageLength, mpt->mpt_dev_page1[i].Header.PageNumber, mpt->mpt_dev_page1[i].Header.PageType); } /* * At this point, we don't *have* to fail. As long as we have * valid config header information, we can (barely) lurch * along. */ rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header, sizeof(mpt->mpt_port_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 0\n"); } else { mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0); mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n", mpt->mpt_port_page0.Capabilities, mpt->mpt_port_page0.PhysicalInterface); } rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header, sizeof(mpt->mpt_port_page1), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 1\n"); } else { mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1); mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n", mpt->mpt_port_page1.Configuration, mpt->mpt_port_page1.OnBusTimerValue); } rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header, sizeof(mpt->mpt_port_page2), FALSE, 5000); if (rv) { mpt_prt(mpt, "failed to read SPI Port Page 2\n"); } else { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "Port Page 2: Flags %x Settings %x\n", mpt->mpt_port_page2.PortFlags, mpt->mpt_port_page2.PortSettings); mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2); for (i = 0; i < 16; i++) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n", i, mpt->mpt_port_page2.DeviceSettings[i].Timeout, mpt->mpt_port_page2.DeviceSettings[i].SyncFactor, mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags); } } for (i = 0; i < 16; i++) { rv = mpt_read_cur_cfg_page(mpt, i, &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0), FALSE, 5000); if (rv) { mpt_prt(mpt, "cannot read SPI Target %d Device Page 0\n", i); continue; } mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]); mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "target %d page 0: Negotiated Params %x Information %x\n", i, mpt->mpt_dev_page0[i].NegotiatedParameters, mpt->mpt_dev_page0[i].Information); rv = mpt_read_cur_cfg_page(mpt, i, &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1), FALSE, 5000); if (rv) { mpt_prt(mpt, "cannot read SPI Target %d Device Page 1\n", i); continue; } mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]); mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "target %d page 1: Requested Params %x Configuration %x\n", i, mpt->mpt_dev_page1[i].RequestedParameters, mpt->mpt_dev_page1[i].Configuration); } return (0); } /* * Validate SPI configuration information. * * In particular, validate SPI Port Page 1. */ static int mpt_set_initial_config_spi(struct mpt_softc *mpt) { int error, i, pp1val; mpt->mpt_disc_enable = 0xff; mpt->mpt_tag_enable = 0; pp1val = ((1 << mpt->mpt_ini_id) << MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id; if (mpt->mpt_port_page1.Configuration != pp1val) { CONFIG_PAGE_SCSI_PORT_1 tmp; mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should " "be %x\n", mpt->mpt_port_page1.Configuration, pp1val); tmp = mpt->mpt_port_page1; tmp.Configuration = pp1val; host2mpt_config_page_scsi_port_1(&tmp); error = mpt_write_cur_cfg_page(mpt, 0, &tmp.Header, sizeof(tmp), FALSE, 5000); if (error) { return (-1); } error = mpt_read_cur_cfg_page(mpt, 0, &tmp.Header, sizeof(tmp), FALSE, 5000); if (error) { return (-1); } mpt2host_config_page_scsi_port_1(&tmp); if (tmp.Configuration != pp1val) { mpt_prt(mpt, "failed to reset SPI Port Page 1 Config value\n"); return (-1); } mpt->mpt_port_page1 = tmp; } /* * The purpose of this exercise is to get * all targets back to async/narrow. * * We skip this step if the BIOS has already negotiated * speeds with the targets. */ i = mpt->mpt_port_page2.PortSettings & MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "honoring BIOS transfer negotiations\n"); } else { for (i = 0; i < 16; i++) { mpt->mpt_dev_page1[i].RequestedParameters = 0; mpt->mpt_dev_page1[i].Configuration = 0; (void) mpt_update_spi_config(mpt, i); } } return (0); } static int mpt_cam_enable(struct mpt_softc *mpt) { int error; MPT_LOCK(mpt); error = EIO; if (mpt->is_fc) { if (mpt_read_config_info_fc(mpt)) { goto out; } if (mpt_set_initial_config_fc(mpt)) { goto out; } } else if (mpt->is_sas) { if (mpt_read_config_info_sas(mpt)) { goto out; } if (mpt_set_initial_config_sas(mpt)) { goto out; } } else if (mpt->is_spi) { if (mpt_read_config_info_spi(mpt)) { goto out; } if (mpt_set_initial_config_spi(mpt)) { goto out; } } error = 0; out: MPT_UNLOCK(mpt); return (error); } static void mpt_cam_ready(struct mpt_softc *mpt) { /* * If we're in target mode, hang out resources now * so we don't cause the world to hang talking to us. */ if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { /* * Try to add some target command resources */ MPT_LOCK(mpt); if (mpt_add_target_commands(mpt) == FALSE) { mpt_prt(mpt, "failed to add target commands\n"); } MPT_UNLOCK(mpt); } mpt->ready = 1; } static void mpt_cam_detach(struct mpt_softc *mpt) { mpt_handler_t handler; MPT_LOCK(mpt); mpt->ready = 0; mpt_terminate_recovery_thread(mpt); handler.reply_handler = mpt_scsi_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, scsi_io_handler_id); handler.reply_handler = mpt_scsi_tmf_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, scsi_tmf_handler_id); handler.reply_handler = mpt_fc_els_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, fc_els_handler_id); handler.reply_handler = mpt_scsi_tgt_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, mpt->scsi_tgt_handler_id); handler.reply_handler = mpt_sata_pass_reply_handler; mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler, sata_pass_handler_id); if (mpt->tmf_req != NULL) { mpt->tmf_req->state = REQ_STATE_ALLOCATED; mpt_free_request(mpt, mpt->tmf_req); mpt->tmf_req = NULL; } if (mpt->sas_portinfo != NULL) { free(mpt->sas_portinfo, M_DEVBUF); mpt->sas_portinfo = NULL; } if (mpt->sim != NULL) { xpt_free_path(mpt->path); xpt_bus_deregister(cam_sim_path(mpt->sim)); cam_sim_free(mpt->sim, TRUE); mpt->sim = NULL; } if (mpt->phydisk_sim != NULL) { xpt_free_path(mpt->phydisk_path); xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim)); cam_sim_free(mpt->phydisk_sim, TRUE); mpt->phydisk_sim = NULL; } MPT_UNLOCK(mpt); } /* This routine is used after a system crash to dump core onto the swap device. */ static void mpt_poll(struct cam_sim *sim) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)cam_sim_softc(sim); mpt_intr(mpt); } /* * Watchdog timeout routine for SCSI requests. */ static void mpt_timeout(void *arg) { union ccb *ccb; struct mpt_softc *mpt; request_t *req; ccb = (union ccb *)arg; mpt = ccb->ccb_h.ccb_mpt_ptr; MPT_LOCK_ASSERT(mpt); req = ccb->ccb_h.ccb_req_ptr; mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req, req->serno, ccb, req->ccb); /* XXX: WHAT ARE WE TRYING TO DO HERE? */ if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) { TAILQ_REMOVE(&mpt->request_pending_list, req, links); TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links); req->state |= REQ_STATE_TIMEDOUT; mpt_wakeup_recovery_thread(mpt); } } /* * Callback routine from bus_dmamap_load_ccb(9) or, in simple cases, called * directly. * * Takes a list of physical segments and builds the SGL for SCSI IO command * and forwards the commard to the IOC after one last check that CAM has not * aborted the transaction. */ static void mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { request_t *req, *trq; char *mpt_off; union ccb *ccb; struct mpt_softc *mpt; bus_addr_t chain_list_addr; int first_lim, seg, this_seg_lim; uint32_t addr, cur_off, flags, nxt_off, tf; void *sglp = NULL; MSG_REQUEST_HEADER *hdrp; SGE_SIMPLE64 *se; SGE_CHAIN64 *ce; int istgt = 0; req = (request_t *)arg; ccb = req->ccb; mpt = ccb->ccb_h.ccb_mpt_ptr; req = ccb->ccb_h.ccb_req_ptr; hdrp = req->req_vbuf; mpt_off = req->req_vbuf; if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; } if (error == 0) { switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: istgt = 0; sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; break; case MPI_FUNCTION_TARGET_ASSIST: istgt = 1; sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; break; default: mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n", hdrp->Function); error = EINVAL; break; } } if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; mpt_prt(mpt, "segment count %d too large (max %u)\n", nseg, mpt->max_seg_cnt); } bad: if (error != 0) { if (error != EFBIG && error != ENOMEM) { mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { cam_status status; mpt_freeze_ccb(ccb); if (error == EFBIG) { status = CAM_REQ_TOO_BIG; } else if (error == ENOMEM) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } status = CAM_REQUEUE_REQ; } else { status = CAM_REQ_CMP_ERR; } mpt_set_ccb_status(ccb, status); } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } /* * No data to transfer? * Just make a single simple SGL with zero length. */ if (mpt->verbose >= MPT_PRT_DEBUG) { int tidx = ((char *)sglp) - mpt_off; memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); } if (nseg == 0) { SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; MPI_pSGE_SET_FLAGS(se1, (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); se1->FlagsLength = htole32(se1->FlagsLength); goto out; } flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREWRITE; } else { op = BUS_DMASYNC_PREREAD; } } bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); } /* * Okay, fill in what we can at the end of the command frame. * If we have up to MPT_NSGL_FIRST, we can fit them all into * the command frame. * * Otherwise, we fill up through MPT_NSGL_FIRST less one * SIMPLE64 pointers and start doing CHAIN64 entries after * that. */ if (nseg < MPT_NSGL_FIRST(mpt)) { first_lim = nseg; } else { /* * Leave room for CHAIN element */ first_lim = MPT_NSGL_FIRST(mpt) - 1; } se = (SGE_SIMPLE64 *) sglp; for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { tf = flags; memset(se, 0, sizeof (*se)); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); if (sizeof(bus_addr_t) > 4) { addr = ((uint64_t)dm_segs->ds_addr) >> 32; /* SAS1078 36GB limitation WAR */ if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { addr |= (1U << 31); tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; } se->Address.High = htole32(addr); } if (seg == first_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); } if (seg == nseg) { goto out; } /* * Tell the IOC where to find the first chain element. */ hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; nxt_off = MPT_RQSL(mpt); trq = req; /* * Make up the rest of the data segments out of a chain element * (contained in the current request frame) which points to * SIMPLE64 elements in the next request frame, possibly ending * with *another* chain element (if there's more). */ while (seg < nseg) { /* * Point to the chain descriptor. Note that the chain * descriptor is at the end of the *previous* list (whether * chain or simple). */ ce = (SGE_CHAIN64 *) se; /* * Before we change our current pointer, make sure we won't * overflow the request area with this frame. Note that we * test against 'greater than' here as it's okay in this case * to have next offset be just outside the request area. */ if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { nxt_off = MPT_REQUEST_AREA; goto next_chain; } /* * Set our SGE element pointer to the beginning of the chain * list and update our next chain list offset. */ se = (SGE_SIMPLE64 *) &mpt_off[nxt_off]; cur_off = nxt_off; nxt_off += MPT_RQSL(mpt); /* * Now initialize the chain descriptor. */ memset(ce, 0, sizeof (*ce)); /* * Get the physical address of the chain list. */ chain_list_addr = trq->req_pbuf; chain_list_addr += cur_off; if (sizeof (bus_addr_t) > 4) { ce->Address.High = htole32(((uint64_t)chain_list_addr) >> 32); } ce->Address.Low = htole32(chain_list_addr & 0xffffffff); ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING; /* * If we have more than a frame's worth of segments left, * set up the chain list to have the last element be another * chain descriptor. */ if ((nseg - seg) > MPT_NSGL(mpt)) { this_seg_lim = seg + MPT_NSGL(mpt) - 1; /* * The length of the chain is the length in bytes of the * number of segments plus the next chain element. * * The next chain descriptor offset is the length, * in words, of the number of segments. */ ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE64); ce->NextChainOffset = ce->Length >> 2; ce->Length += sizeof (SGE_CHAIN64); } else { this_seg_lim = nseg; ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE64); } ce->Length = htole16(ce->Length); /* * Fill in the chain list SGE elements with our segment data. * * If we're the last element in this chain list, set the last * element flag. If we're the completely last element period, * set the end of list and end of buffer flags. */ while (seg < this_seg_lim) { tf = flags; memset(se, 0, sizeof (*se)); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff); if (sizeof (bus_addr_t) > 4) { addr = ((uint64_t)dm_segs->ds_addr) >> 32; /* SAS1078 36GB limitation WAR */ if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr + MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) { addr |= (1U << 31); tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS; } se->Address.High = htole32(addr); } if (seg == this_seg_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); se++; seg++; dm_segs++; } next_chain: /* * If we have more segments to do and we've used up all of * the space in a request area, go allocate another one * and chain to that. */ if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { request_t *nrq; nrq = mpt_get_request(mpt, FALSE); if (nrq == NULL) { error = ENOMEM; goto bad; } /* * Append the new request area on the tail of our list. */ if ((trq = req->chain) == NULL) { req->chain = nrq; } else { while (trq->chain != NULL) { trq = trq->chain; } trq->chain = nrq; } trq = nrq; mpt_off = trq->req_vbuf; if (mpt->verbose >= MPT_PRT_DEBUG) { memset(mpt_off, 0xff, MPT_REQUEST_AREA); } nxt_off = 0; } } out: /* * Last time we need to check if this CCB needs to be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } mpt_prt(mpt, "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n", ccb->ccb_h.status & CAM_STATUS_MASK); if (nseg) { bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout, mpt_timeout, ccb); } if (mpt->verbose > MPT_PRT_DEBUG) { int nc = 0; mpt_print_request(req->req_vbuf); for (trq = req->chain; trq; trq = trq->chain) { printf(" Additional Chain Area %d\n", nc++); mpt_dump_sgl(trq->req_vbuf, 0); } } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; } else { tgt->state = TGT_STATE_MOVING_DATA; } #else tgt->state = TGT_STATE_MOVING_DATA; #endif } mpt_send_cmd(mpt, req); } static void mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error) { request_t *req, *trq; char *mpt_off; union ccb *ccb; struct mpt_softc *mpt; int seg, first_lim; uint32_t flags, nxt_off; void *sglp = NULL; MSG_REQUEST_HEADER *hdrp; SGE_SIMPLE32 *se; SGE_CHAIN32 *ce; int istgt = 0; req = (request_t *)arg; ccb = req->ccb; mpt = ccb->ccb_h.ccb_mpt_ptr; req = ccb->ccb_h.ccb_req_ptr; hdrp = req->req_vbuf; mpt_off = req->req_vbuf; if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; } if (error == 0) { switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL; break; case MPI_FUNCTION_TARGET_ASSIST: istgt = 1; sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL; break; default: mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n", hdrp->Function); error = EINVAL; break; } } if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) { error = EFBIG; mpt_prt(mpt, "segment count %d too large (max %u)\n", nseg, mpt->max_seg_cnt); } bad: if (error != 0) { if (error != EFBIG && error != ENOMEM) { mpt_prt(mpt, "mpt_execute_req: err %d\n", error); } if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) { cam_status status; mpt_freeze_ccb(ccb); if (error == EFBIG) { status = CAM_REQ_TOO_BIG; } else if (error == ENOMEM) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } status = CAM_REQUEUE_REQ; } else { status = CAM_REQ_CMP_ERR; } mpt_set_ccb_status(ccb, status); } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } /* * No data to transfer? * Just make a single simple SGL with zero length. */ if (mpt->verbose >= MPT_PRT_DEBUG) { int tidx = ((char *)sglp) - mpt_off; memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx); } if (nseg == 0) { SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp; MPI_pSGE_SET_FLAGS(se1, (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST)); se1->FlagsLength = htole32(se1->FlagsLength); goto out; } flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; if (istgt == 0) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } } if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if (istgt) { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREREAD; } else { op = BUS_DMASYNC_PREWRITE; } } else { if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { op = BUS_DMASYNC_PREWRITE; } else { op = BUS_DMASYNC_PREREAD; } } bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); } /* * Okay, fill in what we can at the end of the command frame. * If we have up to MPT_NSGL_FIRST, we can fit them all into * the command frame. * * Otherwise, we fill up through MPT_NSGL_FIRST less one * SIMPLE32 pointers and start doing CHAIN32 entries after * that. */ if (nseg < MPT_NSGL_FIRST(mpt)) { first_lim = nseg; } else { /* * Leave room for CHAIN element */ first_lim = MPT_NSGL_FIRST(mpt) - 1; } se = (SGE_SIMPLE32 *) sglp; for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) { uint32_t tf; memset(se, 0,sizeof (*se)); se->Address = htole32(dm_segs->ds_addr); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (seg == first_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); } if (seg == nseg) { goto out; } /* * Tell the IOC where to find the first chain element. */ hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2; nxt_off = MPT_RQSL(mpt); trq = req; /* * Make up the rest of the data segments out of a chain element * (contained in the current request frame) which points to * SIMPLE32 elements in the next request frame, possibly ending * with *another* chain element (if there's more). */ while (seg < nseg) { int this_seg_lim; uint32_t tf, cur_off; bus_addr_t chain_list_addr; /* * Point to the chain descriptor. Note that the chain * descriptor is at the end of the *previous* list (whether * chain or simple). */ ce = (SGE_CHAIN32 *) se; /* * Before we change our current pointer, make sure we won't * overflow the request area with this frame. Note that we * test against 'greater than' here as it's okay in this case * to have next offset be just outside the request area. */ if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) { nxt_off = MPT_REQUEST_AREA; goto next_chain; } /* * Set our SGE element pointer to the beginning of the chain * list and update our next chain list offset. */ se = (SGE_SIMPLE32 *) &mpt_off[nxt_off]; cur_off = nxt_off; nxt_off += MPT_RQSL(mpt); /* * Now initialize the chain descriptor. */ memset(ce, 0, sizeof (*ce)); /* * Get the physical address of the chain list. */ chain_list_addr = trq->req_pbuf; chain_list_addr += cur_off; ce->Address = htole32(chain_list_addr); ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT; /* * If we have more than a frame's worth of segments left, * set up the chain list to have the last element be another * chain descriptor. */ if ((nseg - seg) > MPT_NSGL(mpt)) { this_seg_lim = seg + MPT_NSGL(mpt) - 1; /* * The length of the chain is the length in bytes of the * number of segments plus the next chain element. * * The next chain descriptor offset is the length, * in words, of the number of segments. */ ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE32); ce->NextChainOffset = ce->Length >> 2; ce->Length += sizeof (SGE_CHAIN32); } else { this_seg_lim = nseg; ce->Length = (this_seg_lim - seg) * sizeof (SGE_SIMPLE32); } ce->Length = htole16(ce->Length); /* * Fill in the chain list SGE elements with our segment data. * * If we're the last element in this chain list, set the last * element flag. If we're the completely last element period, * set the end of list and end of buffer flags. */ while (seg < this_seg_lim) { memset(se, 0, sizeof (*se)); se->Address = htole32(dm_segs->ds_addr); MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len); tf = flags; if (seg == this_seg_lim - 1) { tf |= MPI_SGE_FLAGS_LAST_ELEMENT; } if (seg == nseg - 1) { tf |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; } MPI_pSGE_SET_FLAGS(se, tf); se->FlagsLength = htole32(se->FlagsLength); se++; seg++; dm_segs++; } next_chain: /* * If we have more segments to do and we've used up all of * the space in a request area, go allocate another one * and chain to that. */ if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) { request_t *nrq; nrq = mpt_get_request(mpt, FALSE); if (nrq == NULL) { error = ENOMEM; goto bad; } /* * Append the new request area on the tail of our list. */ if ((trq = req->chain) == NULL) { req->chain = nrq; } else { while (trq->chain != NULL) { trq = trq->chain; } trq->chain = nrq; } trq = nrq; mpt_off = trq->req_vbuf; if (mpt->verbose >= MPT_PRT_DEBUG) { memset(mpt_off, 0xff, MPT_REQUEST_AREA); } nxt_off = 0; } } out: /* * Last time we need to check if this CCB needs to be aborted. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) { if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM; MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL; MPT_TGT_STATE(mpt, cmd_req)->req = NULL; } mpt_prt(mpt, "mpt_execute_req: I/O cancelled (status 0x%x)\n", ccb->ccb_h.status & CAM_STATUS_MASK); if (nseg) { bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); mpt_free_request(mpt, req); return; } ccb->ccb_h.status |= CAM_SIM_QUEUED; if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) { mpt_req_timeout(req, SBT_1MS * ccb->ccb_h.timeout, mpt_timeout, ccb); } if (mpt->verbose > MPT_PRT_DEBUG) { int nc = 0; mpt_print_request(req->req_vbuf); for (trq = req->chain; trq; trq = trq->chain) { printf(" Additional Chain Area %d\n", nc++); mpt_dump_sgl(trq->req_vbuf, 0); } } if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) { request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; } else { tgt->state = TGT_STATE_MOVING_DATA; } #else tgt->state = TGT_STATE_MOVING_DATA; #endif } mpt_send_cmd(mpt, req); } static void mpt_start(struct cam_sim *sim, union ccb *ccb) { request_t *req; struct mpt_softc *mpt; MSG_SCSI_IO_REQUEST *mpt_req; struct ccb_scsiio *csio = &ccb->csio; struct ccb_hdr *ccbh = &ccb->ccb_h; bus_dmamap_callback_t *cb; target_id_t tgt; int raid_passthru; int error; /* Get the pointer for the physical addapter */ mpt = ccb->ccb_h.ccb_mpt_ptr; raid_passthru = (sim == mpt->phydisk_sim); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); return; } #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__); #endif if (sizeof (bus_addr_t) > 4) { cb = mpt_execute_req_a64; } else { cb = mpt_execute_req; } /* * Link the ccb and the request structure so we can find * the other knowing either the request or the ccb */ req->ccb = ccb; ccb->ccb_h.ccb_req_ptr = req; /* Now we build the command for the IOC */ mpt_req = req->req_vbuf; memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST)); mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST; if (raid_passthru) { mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH; if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } mpt_req->Bus = 0; /* we never set bus here */ } else { tgt = ccb->ccb_h.target_id; mpt_req->Bus = 0; /* XXX */ } mpt_req->SenseBufferLength = (csio->sense_len < MPT_SENSE_SIZE) ? csio->sense_len : MPT_SENSE_SIZE; /* * We use the message context to find the request structure when we * Get the command completion interrupt from the IOC. */ mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id); /* Which physical device to do the I/O on */ mpt_req->TargetID = tgt; be64enc(mpt_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(ccb->ccb_h.target_lun)); /* Set the direction of the transfer */ if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { mpt_req->Control = MPI_SCSIIO_CONTROL_READ; } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) { mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE; } else { mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER; } if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) { switch(ccb->csio.tag_action) { case MSG_HEAD_OF_Q_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ; break; case MSG_ACA_TASK: mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ; break; case MSG_ORDERED_Q_TAG: mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ; break; case MSG_SIMPLE_Q_TAG: default: mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; break; } } else { if (mpt->is_fc || mpt->is_sas) { mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ; } else { /* XXX No such thing for a target doing packetized. */ mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED; } } if (mpt->is_spi) { if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) { mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT; } } mpt_req->Control = htole32(mpt_req->Control); /* Copy the scsi command block into place */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len); } else { bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len); } mpt_req->CDBLength = csio->cdb_len; mpt_req->DataLength = htole32(csio->dxfer_len); mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf); /* * Do a *short* print here if we're set to MPT_PRT_DEBUG */ if (mpt->verbose == MPT_PRT_DEBUG) { U32 df; mpt_prt(mpt, "mpt_start: %s op 0x%x ", (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)? "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]); df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK; if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) { mpt_prtc(mpt, "(%s %u byte%s ", (df == MPI_SCSIIO_CONTROL_READ)? "read" : "write", csio->dxfer_len, (csio->dxfer_len == 1)? ")" : "s)"); } mpt_prtc(mpt, "tgt %u lun %jx req %p:%u\n", tgt, (uintmax_t)ccb->ccb_h.target_lun, req, req->serno); } error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb, req, 0); if (error == EINPROGRESS) { /* * So as to maintain ordering, freeze the controller queue * until our mapping is returned. */ xpt_freeze_simq(mpt->sim, 1); ccbh->status |= CAM_RELEASE_SIMQ; } } static int mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun, int sleep_ok) { int error; uint16_t status; uint8_t response; error = mpt_scsi_send_tmf(mpt, (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ? MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET : MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0, 0, /* XXX How do I get the channel ID? */ tgt != CAM_TARGET_WILDCARD ? tgt : 0, lun != CAM_LUN_WILDCARD ? lun : 0, 0, sleep_ok); if (error != 0) { /* * mpt_scsi_send_tmf hard resets on failure, so no * need to do so here. */ mpt_prt(mpt, "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error); return (EIO); } /* Wait for bus reset to be processed by the IOC. */ error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, REQ_STATE_DONE, sleep_ok, 5000); status = le16toh(mpt->tmf_req->IOCStatus); response = mpt->tmf_req->ResponseCode; mpt->tmf_req->state = REQ_STATE_FREE; if (error) { mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. " "Resetting controller.\n"); mpt_reset(mpt, TRUE); return (ETIMEDOUT); } if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. " "Resetting controller.\n", status); mpt_reset(mpt, TRUE); return (EIO); } if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. " "Resetting controller.\n", response); mpt_reset(mpt, TRUE); return (EIO); } return (0); } static int mpt_fc_reset_link(struct mpt_softc *mpt, int dowait) { int r = 0; request_t *req; PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc; req = mpt_get_request(mpt, FALSE); if (req == NULL) { return (ENOMEM); } fc = req->req_vbuf; memset(fc, 0, sizeof(*fc)); fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK; fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND; fc->MsgContext = htole32(req->index | fc_els_handler_id); mpt_send_cmd(mpt, req); if (dowait) { r = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, FALSE, 60 * 1000); if (r == 0) { mpt_free_request(mpt, req); } } return (r); } static int mpt_cam_event(struct mpt_softc *mpt, request_t *req, MSG_EVENT_NOTIFY_REPLY *msg) { uint32_t data0, data1; data0 = le32toh(msg->Data[0]); data1 = le32toh(msg->Data[1]); switch(msg->Event & 0xFF) { case MPI_EVENT_UNIT_ATTENTION: mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n", (data0 >> 8) & 0xff, data0 & 0xff); break; case MPI_EVENT_IOC_BUS_RESET: /* We generated a bus reset */ mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n", (data0 >> 8) & 0xff); xpt_async(AC_BUS_RESET, mpt->path, NULL); break; case MPI_EVENT_EXT_BUS_RESET: /* Someone else generated a bus reset */ mpt_prt(mpt, "External Bus Reset Detected\n"); /* * These replies don't return EventData like the MPI * spec says they do */ xpt_async(AC_BUS_RESET, mpt->path, NULL); break; case MPI_EVENT_RESCAN: { union ccb *ccb; uint32_t pathid; /* * In general this means a device has been added to the loop. */ mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff); if (mpt->ready == 0) { break; } if (mpt->phydisk_sim) { pathid = cam_sim_path(mpt->phydisk_sim); } else { pathid = cam_sim_path(mpt->sim); } /* * Allocate a CCB, create a wildcard path for this bus, * and schedule a rescan. */ ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mpt_prt(mpt, "unable to alloc CCB for rescan\n"); break; } if (xpt_create_path(&ccb->ccb_h.path, NULL, pathid, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create path for rescan\n"); xpt_free_ccb(ccb); break; } xpt_rescan(ccb); break; } case MPI_EVENT_LINK_STATUS_CHANGE: mpt_prt(mpt, "Port %d: LinkState: %s\n", (data1 >> 8) & 0xff, ((data0 & 0xff) == 0)? "Failed" : "Active"); break; case MPI_EVENT_LOOP_STATE_CHANGE: switch ((data0 >> 16) & 0xff) { case 0x01: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) " "(Loop Initialization)\n", (data1 >> 8) & 0xff, (data0 >> 8) & 0xff, (data0 ) & 0xff); switch ((data0 >> 8) & 0xff) { case 0xF7: if ((data0 & 0xff) == 0xF7) { mpt_prt(mpt, "Device needs AL_PA\n"); } else { mpt_prt(mpt, "Device %02x doesn't like " "FC performance\n", data0 & 0xFF); } break; case 0xF8: if ((data0 & 0xff) == 0xF7) { mpt_prt(mpt, "Device had loop failure " "at its receiver prior to acquiring" " AL_PA\n"); } else { mpt_prt(mpt, "Device %02x detected loop" " failure at its receiver\n", data0 & 0xFF); } break; default: mpt_prt(mpt, "Device %02x requests that device " "%02x reset itself\n", data0 & 0xFF, (data0 >> 8) & 0xFF); break; } break; case 0x02: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " "LPE(%02x,%02x) (Loop Port Enable)\n", (data1 >> 8) & 0xff, /* Port */ (data0 >> 8) & 0xff, /* Character 3 */ (data0 ) & 0xff /* Character 4 */); break; case 0x03: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: " "LPB(%02x,%02x) (Loop Port Bypass)\n", (data1 >> 8) & 0xff, /* Port */ (data0 >> 8) & 0xff, /* Character 3 */ (data0 ) & 0xff /* Character 4 */); break; default: mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown " "FC event (%02x %02x %02x)\n", (data1 >> 8) & 0xff, /* Port */ (data0 >> 16) & 0xff, /* Event */ (data0 >> 8) & 0xff, /* Character 3 */ (data0 ) & 0xff /* Character 4 */); } break; case MPI_EVENT_LOGOUT: mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n", (data1 >> 8) & 0xff, data0); break; case MPI_EVENT_QUEUE_FULL: { struct cam_sim *sim; struct cam_path *tmppath; struct ccb_relsim crs; PTR_EVENT_DATA_QUEUE_FULL pqf; lun_id_t lun_id; pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data; pqf->CurrentDepth = le16toh(pqf->CurrentDepth); if (bootverbose) { mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x " "Depth %d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth); } if (mpt->phydisk_sim && mpt_is_raid_member(mpt, pqf->TargetID) != 0) { sim = mpt->phydisk_sim; } else { sim = mpt->sim; } for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) { if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), pqf->TargetID, lun_id) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create a path to send " "XPT_REL_SIMQ"); break; } xpt_setup_ccb(&crs.ccb_h, tmppath, 5); crs.ccb_h.func_code = XPT_REL_SIMQ; crs.ccb_h.flags = CAM_DEV_QFREEZE; crs.release_flags = RELSIM_ADJUST_OPENINGS; crs.openings = pqf->CurrentDepth - 1; xpt_action((union ccb *)&crs); if (crs.ccb_h.status != CAM_REQ_CMP) { mpt_prt(mpt, "XPT_REL_SIMQ failed\n"); } xpt_free_path(tmppath); } break; } case MPI_EVENT_IR_RESYNC_UPDATE: mpt_prt(mpt, "IR resync update %d completed\n", (data0 >> 16) & 0xff); break; case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE: { union ccb *ccb; struct cam_sim *sim; struct cam_path *tmppath; PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc; psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data; if (mpt->phydisk_sim && mpt_is_raid_member(mpt, psdsc->TargetID) != 0) sim = mpt->phydisk_sim; else sim = mpt->sim; switch(psdsc->ReasonCode) { case MPI_EVENT_SAS_DEV_STAT_RC_ADDED: ccb = xpt_alloc_ccb_nowait(); if (ccb == NULL) { mpt_prt(mpt, "unable to alloc CCB for rescan\n"); break; } if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sim), psdsc->TargetID, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create path for rescan\n"); xpt_free_ccb(ccb); break; } xpt_rescan(ccb); break; case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING: if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim), psdsc->TargetID, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { mpt_prt(mpt, "unable to create path for async event"); break; } xpt_async(AC_LOST_DEVICE, tmppath, NULL); xpt_free_path(tmppath); break; case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET: case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL: case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET: break; default: mpt_lprt(mpt, MPT_PRT_WARN, "SAS device status change: Bus: 0x%02x TargetID: " "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus, psdsc->TargetID, psdsc->ReasonCode); break; } break; } case MPI_EVENT_SAS_DISCOVERY_ERROR: { PTR_EVENT_DATA_DISCOVERY_ERROR pde; pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data; pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus); mpt_lprt(mpt, MPT_PRT_WARN, "SAS discovery error: Port: 0x%02x Status: 0x%08x\n", pde->Port, pde->DiscoveryStatus); break; } case MPI_EVENT_EVENT_CHANGE: case MPI_EVENT_INTEGRATED_RAID: case MPI_EVENT_IR2: case MPI_EVENT_LOG_ENTRY_ADDED: case MPI_EVENT_SAS_DISCOVERY: case MPI_EVENT_SAS_PHY_LINK_STATUS: case MPI_EVENT_SAS_SES: break; default: mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n", msg->Event & 0xFF); return (0); } return (1); } /* * Reply path for all SCSI I/O requests, called from our * interrupt handler by extracting our handler index from * the MsgContext field of the reply from the IOC. * * This routine is optimized for the common case of a * completion without error. All exception handling is * offloaded to non-inlined helper routines to minimize * cache footprint. */ static int mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { MSG_SCSI_IO_REQUEST *scsi_req; union ccb *ccb; if (req->state == REQ_STATE_FREE) { mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n"); return (TRUE); } scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf; ccb = req->ccb; if (ccb == NULL) { mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n", req, req->serno); return (TRUE); } mpt_req_untimeout(req, mpt_timeout, ccb); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { bus_dmasync_op_t op; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) op = BUS_DMASYNC_POSTREAD; else op = BUS_DMASYNC_POSTWRITE; bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op); bus_dmamap_unload(mpt->buffer_dmat, req->dmap); } if (reply_frame == NULL) { /* * Context only reply, completion without error status. */ ccb->csio.resid = 0; mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->csio.scsi_status = SCSI_STATUS_OK; } else { mpt_scsi_reply_frame_handler(mpt, req, reply_frame); } if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) { struct scsi_inquiry_data *iq = (struct scsi_inquiry_data *)ccb->csio.data_ptr; if (scsi_req->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) { /* * Fake out the device type so that only the * pass-thru device will attach. */ iq->device &= ~0x1F; iq->device |= T_NODEVICE; } } if (mpt->verbose == MPT_PRT_DEBUG) { mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n", req, req->serno); } KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); xpt_done(ccb); if ((req->state & REQ_STATE_TIMEDOUT) == 0) { TAILQ_REMOVE(&mpt->request_pending_list, req, links); } else { mpt_prt(mpt, "completing timedout/aborted req %p:%u\n", req, req->serno); TAILQ_REMOVE(&mpt->request_timeout_list, req, links); } KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0, ("CCB req needed wakeup")); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__); #endif mpt_free_request(mpt, req); return (TRUE); } static int mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { MSG_SCSI_TASK_MGMT_REPLY *tmf_reply; KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req")); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__); #endif tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame; /* Record IOC Status and Response Code of TMF for any waiters. */ req->IOCStatus = le16toh(tmf_reply->IOCStatus); req->ResponseCode = tmf_reply->ResponseCode; mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n", req, req->serno, le16toh(tmf_reply->IOCStatus)); TAILQ_REMOVE(&mpt->request_pending_list, req, links); if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) { req->state |= REQ_STATE_DONE; wakeup(req); } else { mpt->tmf_req->state = REQ_STATE_FREE; } return (TRUE); } /* * XXX: Move to definitions file */ #define ELS 0x22 #define FC4LS 0x32 #define ABTS 0x81 #define BA_ACC 0x84 #define LS_RJT 0x01 #define LS_ACC 0x02 #define PLOGI 0x03 #define LOGO 0x05 #define SRR 0x14 #define PRLI 0x20 #define PRLO 0x21 #define ADISC 0x52 #define RSCN 0x61 static void mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req, PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length) { uint32_t fl; MSG_LINK_SERVICE_RSP_REQUEST tmp; PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp; /* * We are going to reuse the ELS request to send this response back. */ rsp = &tmp; memset(rsp, 0, sizeof(*rsp)); #ifdef USE_IMMEDIATE_LINK_DATA /* * Apparently the IMMEDIATE stuff doesn't seem to work. */ rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE; #endif rsp->RspLength = length; rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP; rsp->MsgContext = htole32(req->index | fc_els_handler_id); /* * Copy over information from the original reply frame to * it's correct place in the response. */ memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24); /* * And now copy back the temporary area to the original frame. */ memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST)); rsp = req->req_vbuf; #ifdef USE_IMMEDIATE_LINK_DATA memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length); #else { PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL; bus_addr_t paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); fl = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; fl <<= MPI_SGE_FLAGS_SHIFT; fl |= (length); se->FlagsLength = htole32(fl); se->Address = htole32((uint32_t) paddr); } #endif /* * Send it on... */ mpt_send_cmd(mpt, req); } static int mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp = (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame; U8 rctl; U8 type; U8 cmd; U16 status = le16toh(reply_frame->IOCStatus); U32 *elsbuf; int ioindex; int do_refresh = TRUE; #ifdef INVARIANTS KASSERT(mpt_req_on_free_list(mpt, req) == 0, ("fc_els_reply_handler: req %p:%u for function %x on freelist!", req, req->serno, rp->Function)); if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) { mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__); } else { mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__); } #endif mpt_lprt(mpt, MPT_PRT_DEBUG, "FC_ELS Complete: req %p:%u, reply %p function %x\n", req, req->serno, reply_frame, reply_frame->Function); if (status != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n", status, reply_frame->Function); if (status == MPI_IOCSTATUS_INVALID_STATE) { /* * XXX: to get around shutdown issue */ mpt->disabled = 1; return (TRUE); } return (TRUE); } /* * If the function of a link service response, we recycle the * response to be a refresh for a new link service request. * * The request pointer is bogus in this case and we have to fetch * it based upon the TransactionContext. */ if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) { /* Freddie Uncle Charlie Katie */ /* We don't get the IOINDEX as part of the Link Svc Rsp */ for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++) if (mpt->els_cmd_ptrs[ioindex] == req) { break; } KASSERT(ioindex < mpt->els_cmds_allocated, ("can't find my mommie!")); /* remove from active list as we're going to re-post it */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_post_els(mpt, req, ioindex); return (TRUE); } if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) { /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; if (req->state & REQ_STATE_TIMEDOUT) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Sync Primitive Send Completed After Timeout\n"); mpt_free_request(mpt, req); } else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Async Primitive Send Complete\n"); mpt_free_request(mpt, req); } else { mpt_lprt(mpt, MPT_PRT_DEBUG, "Sync Primitive Send Complete- Waking Waiter\n"); wakeup(req); } return (TRUE); } if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) { mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x " "Length %d Message Flags %x\n", rp->Function, rp->Flags, rp->MsgLength, rp->MsgFlags); return (TRUE); } if (rp->MsgLength <= 5) { /* * This is just a ack of an original ELS buffer post */ mpt_lprt(mpt, MPT_PRT_DEBUG, "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno); return (TRUE); } rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT; type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT; elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)]; cmd = be32toh(elsbuf[0]) >> 24; if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) { mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n"); return (TRUE); } ioindex = le32toh(rp->TransactionContext); req = mpt->els_cmd_ptrs[ioindex]; if (rctl == ELS && type == 1) { switch (cmd) { case PRLI: /* * Send back a PRLI ACC */ mpt_prt(mpt, "PRLI from 0x%08x%08x\n", le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); elsbuf[0] = htobe32(0x02100014); elsbuf[1] |= htobe32(0x00000100); elsbuf[4] = htobe32(0x00000002); if (mpt->role & MPT_ROLE_TARGET) elsbuf[4] |= htobe32(0x00000010); if (mpt->role & MPT_ROLE_INITIATOR) elsbuf[4] |= htobe32(0x00000020); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 20); do_refresh = FALSE; break; case PRLO: memset(elsbuf, 0, 5 * (sizeof (U32))); elsbuf[0] = htobe32(0x02100014); elsbuf[1] = htobe32(0x08000100); mpt_prt(mpt, "PRLO from 0x%08x%08x\n", le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 20); do_refresh = FALSE; break; default: mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd); break; } } else if (rctl == ABTS && type == 0) { uint16_t rx_id = le16toh(rp->Rxid); uint16_t ox_id = le16toh(rp->Oxid); mpt_tgt_state_t *tgt; request_t *tgt_req = NULL; union ccb *ccb; uint32_t ct_id; mpt_prt(mpt, "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n", ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh), le32toh(rp->Wwn.PortNameLow)); if (rx_id >= mpt->mpt_max_tgtcmds) { mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id); } else if (mpt->tgt_cmd_ptrs == NULL) { mpt_prt(mpt, "No TGT CMD PTRS\n"); } else { tgt_req = mpt->tgt_cmd_ptrs[rx_id]; } if (tgt_req == NULL) { mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id); goto skip; } tgt = MPT_TGT_STATE(mpt, tgt_req); /* Check to make sure we have the correct command. */ ct_id = GET_IO_INDEX(tgt->reply_desc); if (ct_id != rx_id) { mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " "RX_ID received=0x%x, in cmd=0x%x\n", rx_id, ct_id); goto skip; } if (tgt->itag != ox_id) { mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: " "OX_ID received=0x%x, in cmd=0x%x\n", ox_id, tgt->itag); goto skip; } if ((ccb = tgt->ccb) != NULL) { mpt_prt(mpt, "CCB (%p): lun %jx flags %x status %x\n", ccb, (uintmax_t)ccb->ccb_h.target_lun, ccb->ccb_h.flags, ccb->ccb_h.status); } mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd " "%x nxfers %x\n", tgt->state, tgt->resid, tgt->bytes_xfered, tgt->reply_desc, tgt->nxfers); if (mpt_abort_target_cmd(mpt, tgt_req)) mpt_prt(mpt, "unable to start TargetAbort\n"); skip: memset(elsbuf, 0, 5 * (sizeof (U32))); elsbuf[0] = htobe32(0); elsbuf[1] = htobe32((ox_id << 16) | rx_id); elsbuf[2] = htobe32(0x000ffff); /* * Dork with the reply frame so that the response to it * will be correct. */ rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT); /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_els_send_response(mpt, req, rp, 12); do_refresh = FALSE; } else { mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd); } if (do_refresh == TRUE) { /* remove from active list as we're done */ TAILQ_REMOVE(&mpt->request_pending_list, req, links); req->state &= ~REQ_STATE_QUEUED; req->state |= REQ_STATE_DONE; mpt_fc_post_els(mpt, req, ioindex); } return (TRUE); } /* * Clean up all SCSI Initiator personality state in response * to a controller reset. */ static void mpt_cam_ioc_reset(struct mpt_softc *mpt, int type) { /* * The pending list is already run down by * the generic handler. Perform the same * operation on the timed out request list. */ mpt_complete_request_chain(mpt, &mpt->request_timeout_list, MPI_IOCSTATUS_INVALID_STATE); /* * XXX: We need to repost ELS and Target Command Buffers? */ /* * Inform the XPT that a bus reset has occurred. */ xpt_async(AC_BUS_RESET, mpt->path, NULL); } /* * Parse additional completion information in the reply * frame for SCSI I/O requests. */ static int mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req, MSG_DEFAULT_REPLY *reply_frame) { union ccb *ccb; MSG_SCSI_IO_REPLY *scsi_io_reply; u_int ioc_status; u_int sstate; MPT_DUMP_REPLY_FRAME(mpt, reply_frame); KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH, ("MPT SCSI I/O Handler called with incorrect reply type")); KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0, ("MPT SCSI I/O Handler called with continuation reply")); scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame; ioc_status = le16toh(scsi_io_reply->IOCStatus); ioc_status &= MPI_IOCSTATUS_MASK; sstate = scsi_io_reply->SCSIState; ccb = req->ccb; ccb->csio.resid = ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount); if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) { uint32_t sense_returned; ccb->ccb_h.status |= CAM_AUTOSNS_VALID; sense_returned = le32toh(scsi_io_reply->SenseCount); if (sense_returned < ccb->csio.sense_len) ccb->csio.sense_resid = ccb->csio.sense_len - sense_returned; else ccb->csio.sense_resid = 0; bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data)); bcopy(req->sense_vbuf, &ccb->csio.sense_data, min(ccb->csio.sense_len, sense_returned)); } if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) { /* * Tag messages rejected, but non-tagged retry * was successful. XXXX mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE); */ } switch(ioc_status) { case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* * XXX * Linux driver indicates that a zero * transfer length with this error code * indicates a CRC error. * * No need to swap the bytes for checking * against zero. */ if (scsi_io_reply->TransferCount == 0) { mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); break; } /* FALLTHROUGH */ case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: case MPI_IOCSTATUS_SUCCESS: case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) { /* * Status was never returned for this transaction. */ mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE); } else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) { ccb->csio.scsi_status = scsi_io_reply->SCSIStatus; mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR); if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0) mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL); } else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) { /* XXX Handle SPI-Packet and FCP-2 response info. */ mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } else mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR); break; case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY); break; case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* * Since selection timeouts and "device really not * there" are grouped into this error code, report * selection timeout. Selection timeouts are * typically retried before giving up on the device * whereas "device not there" errors are considered * unretryable. */ mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); break; case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL); break; case MPI_IOCSTATUS_SCSI_INVALID_BUS: mpt_set_ccb_status(ccb, CAM_PATH_INVALID); break; case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: mpt_set_ccb_status(ccb, CAM_TID_INVALID); break; case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: ccb->ccb_h.status = CAM_UA_TERMIO; break; case MPI_IOCSTATUS_INVALID_STATE: /* * The IOC has been reset. Emulate a bus reset. */ /* FALLTHROUGH */ case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: ccb->ccb_h.status = CAM_SCSI_BUS_RESET; break; case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* * Don't clobber any timeout status that has * already been set for this transaction. We * want the SCSI layer to be able to differentiate * between the command we aborted due to timeout * and any innocent bystanders. */ if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) break; mpt_set_ccb_status(ccb, CAM_REQ_TERMIO); break; case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL); break; case MPI_IOCSTATUS_BUSY: mpt_set_ccb_status(ccb, CAM_BUSY); break; case MPI_IOCSTATUS_INVALID_FUNCTION: case MPI_IOCSTATUS_INVALID_SGL: case MPI_IOCSTATUS_INTERNAL_ERROR: case MPI_IOCSTATUS_INVALID_FIELD: default: /* XXX * Some of the above may need to kick * of a recovery action!!!! */ ccb->ccb_h.status = CAM_UNREC_HBA_ERROR; break; } if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { mpt_freeze_ccb(ccb); } return (TRUE); } static void mpt_action(struct cam_sim *sim, union ccb *ccb) { struct mpt_softc *mpt; struct ccb_trans_settings *cts; target_id_t tgt; lun_id_t lun; int raid_passthru; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n")); mpt = (struct mpt_softc *)cam_sim_softc(sim); raid_passthru = (sim == mpt->phydisk_sim); MPT_LOCK_ASSERT(mpt); tgt = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; if (raid_passthru && ccb->ccb_h.func_code != XPT_PATH_INQ && ccb->ccb_h.func_code != XPT_RESET_BUS && ccb->ccb_h.func_code != XPT_RESET_DEV) { if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE); xpt_done(ccb); return; } } ccb->ccb_h.ccb_mpt_ptr = mpt; switch (ccb->ccb_h.func_code) { case XPT_SCSI_IO: /* Execute the requested I/O operation */ /* * Do a couple of preliminary checks... */ if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) { if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } } /* Max supported CDB length is 16 bytes */ /* XXX Unless we implement the new 32byte message type */ if (ccb->csio.cdb_len > sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } #ifdef MPT_TEST_MULTIPATH if (mpt->failure_id == ccb->ccb_h.target_id) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT); break; } #endif ccb->csio.scsi_status = SCSI_STATUS_OK; mpt_start(sim, ccb); return; case XPT_RESET_BUS: if (raid_passthru) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } case XPT_RESET_DEV: if (ccb->ccb_h.func_code == XPT_RESET_BUS) { if (bootverbose) { xpt_print(ccb->ccb_h.path, "reset bus\n"); } } else { xpt_print(ccb->ccb_h.path, "reset device\n"); } (void) mpt_bus_reset(mpt, tgt, lun, FALSE); /* * mpt_bus_reset is always successful in that it * will fall back to a hard reset should a bus * reset attempt fail. */ ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; case XPT_ABORT: { union ccb *accb = ccb->cab.abort_ccb; switch (accb->ccb_h.func_code) { case XPT_ACCEPT_TARGET_IO: case XPT_IMMEDIATE_NOTIFY: ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb); break; case XPT_CONT_TARGET_IO: mpt_prt(mpt, "cannot abort active CTIOs yet\n"); ccb->ccb_h.status = CAM_UA_ABORT; break; case XPT_SCSI_IO: ccb->ccb_h.status = CAM_UA_ABORT; break; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } break; } #define IS_CURRENT_SETTINGS(c) ((c)->type == CTS_TYPE_CURRENT_SETTINGS) #define DP_DISC_ENABLE 0x1 #define DP_DISC_DISABL 0x2 #define DP_DISC (DP_DISC_ENABLE|DP_DISC_DISABL) #define DP_TQING_ENABLE 0x4 #define DP_TQING_DISABL 0x8 #define DP_TQING (DP_TQING_ENABLE|DP_TQING_DISABL) #define DP_WIDE 0x10 #define DP_NARROW 0x20 #define DP_WIDTH (DP_WIDE|DP_NARROW) #define DP_SYNC 0x40 case XPT_SET_TRAN_SETTINGS: /* Nexus Settings */ { struct ccb_trans_settings_scsi *scsi; struct ccb_trans_settings_spi *spi; uint8_t dval; u_int period; u_int offset; int i, j; cts = &ccb->cts; if (mpt->is_fc || mpt->is_sas) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } scsi = &cts->proto_specific.scsi; spi = &cts->xport_specific.spi; /* * We can be called just to valid transport and proto versions */ if (scsi->valid == 0 && spi->valid == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } /* * Skip attempting settings on RAID volume disks. * Other devices on the bus get the normal treatment. */ if (mpt->phydisk_sim && raid_passthru == 0 && mpt_is_raid_volume(mpt, tgt) != 0) { mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "no transfer settings for RAID vols\n"); mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } i = mpt->mpt_port_page2.PortSettings & MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS; j = mpt->mpt_port_page2.PortFlags & MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK; if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS && j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) { mpt_lprt(mpt, MPT_PRT_ALWAYS, "honoring BIOS transfer negotiations\n"); mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } dval = 0; period = 0; offset = 0; if ((spi->valid & CTS_SPI_VALID_DISC) != 0) { dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ? DP_DISC_ENABLE : DP_DISC_DISABL; } if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) { dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ? DP_TQING_ENABLE : DP_TQING_DISABL; } if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) { dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ? DP_WIDE : DP_NARROW; } if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) { dval |= DP_SYNC; offset = spi->sync_offset; } else { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = &mpt->mpt_dev_page1[tgt]; offset = ptr->RequestedParameters; offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; } if (spi->valid & CTS_SPI_VALID_SYNC_RATE) { dval |= DP_SYNC; period = spi->sync_period; } else { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr = &mpt->mpt_dev_page1[tgt]; period = ptr->RequestedParameters; period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; } if (dval & DP_DISC_ENABLE) { mpt->mpt_disc_enable |= (1 << tgt); } else if (dval & DP_DISC_DISABL) { mpt->mpt_disc_enable &= ~(1 << tgt); } if (dval & DP_TQING_ENABLE) { mpt->mpt_tag_enable |= (1 << tgt); } else if (dval & DP_TQING_DISABL) { mpt->mpt_tag_enable &= ~(1 << tgt); } if (dval & DP_WIDTH) { mpt_setwidth(mpt, tgt, 1); } if (dval & DP_SYNC) { mpt_setsync(mpt, tgt, period, offset); } if (dval == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "set [%d]: 0x%x period 0x%x offset %d\n", tgt, dval, period, offset); if (mpt_update_spi_config(mpt, tgt)) { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } else { mpt_set_ccb_status(ccb, CAM_REQ_CMP); } break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings_scsi *scsi; cts = &ccb->cts; cts->protocol = PROTO_SCSI; if (mpt->is_fc) { struct ccb_trans_settings_fc *fc = &cts->xport_specific.fc; cts->protocol_version = SCSI_REV_SPC; cts->transport = XPORT_FC; cts->transport_version = 0; if (mpt->mpt_fcport_speed != 0) { fc->valid = CTS_FC_VALID_SPEED; fc->bitrate = 100000 * mpt->mpt_fcport_speed; } } else if (mpt->is_sas) { struct ccb_trans_settings_sas *sas = &cts->xport_specific.sas; cts->protocol_version = SCSI_REV_SPC2; cts->transport = XPORT_SAS; cts->transport_version = 0; sas->valid = CTS_SAS_VALID_SPEED; sas->bitrate = 300000; } else { cts->protocol_version = SCSI_REV_2; cts->transport = XPORT_SPI; cts->transport_version = 2; if (mpt_get_spi_settings(mpt, cts) != 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); break; } } scsi = &cts->proto_specific.scsi; scsi->valid = CTS_SCSI_VALID_TQ; scsi->flags = CTS_SCSI_FLAGS_TAG_ENB; mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } case XPT_CALC_GEOMETRY: { struct ccb_calc_geometry *ccg; ccg = &ccb->ccg; if (ccg->block_size == 0) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } cam_calc_geometry(ccg, /* extended */ 1); KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); break; } case XPT_GET_SIM_KNOB: { struct ccb_sim_knob *kp = &ccb->knob; if (mpt->is_fc) { kp->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn; kp->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn; switch (mpt->role) { case MPT_ROLE_NONE: kp->xport_specific.fc.role = KNOB_ROLE_NONE; break; case MPT_ROLE_INITIATOR: kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR; break; case MPT_ROLE_TARGET: kp->xport_specific.fc.role = KNOB_ROLE_TARGET; break; case MPT_ROLE_BOTH: kp->xport_specific.fc.role = KNOB_ROLE_BOTH; break; } kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE; ccb->ccb_h.status = CAM_REQ_CMP; } else { ccb->ccb_h.status = CAM_REQ_INVALID; } xpt_done(ccb); break; } case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; cpi->version_num = 1; cpi->target_sprt = 0; cpi->hba_eng_cnt = 0; cpi->max_target = mpt->port_facts[0].MaxDevices - 1; cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE; /* * FC cards report MAX_DEVICES of 512, but * the MSG_SCSI_IO_REQUEST target id field * is only 8 bits. Until we fix the driver * to support 'channels' for bus overflow, * just limit it. */ if (cpi->max_target > 255) { cpi->max_target = 255; } /* * VMware ESX reports > 16 devices and then dies when we probe. */ if (mpt->is_spi && cpi->max_target > 15) { cpi->max_target = 15; } if (mpt->is_spi) cpi->max_lun = 7; else cpi->max_lun = MPT_MAX_LUNS; cpi->initiator_id = mpt->mpt_ini_id; cpi->bus_id = cam_sim_bus(sim); /* * The base speed is the speed of the underlying connection. */ cpi->protocol = PROTO_SCSI; if (mpt->is_fc) { cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_EXTLUNS; cpi->base_transfer_speed = 100000; cpi->hba_inquiry = PI_TAG_ABLE; cpi->transport = XPORT_FC; cpi->transport_version = 0; cpi->protocol_version = SCSI_REV_SPC; cpi->xport_specific.fc.wwnn = mpt->scinfo.fc.wwnn; cpi->xport_specific.fc.wwpn = mpt->scinfo.fc.wwpn; cpi->xport_specific.fc.port = mpt->scinfo.fc.portid; cpi->xport_specific.fc.bitrate = 100000 * mpt->mpt_fcport_speed; } else if (mpt->is_sas) { cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED | PIM_EXTLUNS; cpi->base_transfer_speed = 300000; cpi->hba_inquiry = PI_TAG_ABLE; cpi->transport = XPORT_SAS; cpi->transport_version = 0; cpi->protocol_version = SCSI_REV_SPC2; } else { cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED | PIM_EXTLUNS; cpi->base_transfer_speed = 3300; cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16; cpi->transport = XPORT_SPI; cpi->transport_version = 2; cpi->protocol_version = SCSI_REV_2; } /* * We give our fake RAID passhtru bus a width that is MaxVolumes * wide and restrict it to one lun. */ if (raid_passthru) { cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1; cpi->initiator_id = cpi->max_target + 1; cpi->max_lun = 0; } if ((mpt->role & MPT_ROLE_INITIATOR) == 0) { cpi->hba_misc |= PIM_NOINITIATOR; } if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) { cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO; } else { cpi->target_sprt = 0; } strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strlcpy(cpi->hba_vid, "LSI", HBA_IDLEN); strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->ccb_h.status = CAM_REQ_CMP; break; } case XPT_EN_LUN: /* Enable LUN as a target */ { int result; if (ccb->cel.enable) result = mpt_enable_lun(mpt, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); else result = mpt_disable_lun(mpt, ccb->ccb_h.target_id, ccb->ccb_h.target_lun); if (result == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); } else { mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); } break; } case XPT_IMMEDIATE_NOTIFY: /* Add Immediate Notify Resource */ case XPT_ACCEPT_TARGET_IO: /* Add Accept Target IO Resource */ { tgt_resource_t *trtp; lun_id_t lun = ccb->ccb_h.target_lun; ccb->ccb_h.sim_priv.entries[0].field = 0; ccb->ccb_h.sim_priv.entries[1].ptr = mpt; if (lun == CAM_LUN_WILDCARD) { if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } trtp = &mpt->trt_wildcard; } else if (lun >= MPT_MAX_LUNS) { mpt_set_ccb_status(ccb, CAM_REQ_INVALID); break; } else { trtp = &mpt->trt[lun]; } if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { mpt_lprt(mpt, MPT_PRT_DEBUG1, "Put FREE ATIO %p lun %jx\n", ccb, (uintmax_t)lun); STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h, sim_links.stqe); } else { mpt_lprt(mpt, MPT_PRT_DEBUG1, "Put FREE INOT lun %jx\n", (uintmax_t)lun); STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h, sim_links.stqe); } mpt_set_ccb_status(ccb, CAM_REQ_INPROG); return; } case XPT_NOTIFY_ACKNOWLEDGE: /* Task management request done. */ { request_t *req = MPT_TAG_2_REQ(mpt, ccb->cna2.tag_id); mpt_lprt(mpt, MPT_PRT_DEBUG, "Got Notify ACK\n"); mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL, 0); mpt_set_ccb_status(ccb, CAM_REQ_CMP); break; } case XPT_CONT_TARGET_IO: mpt_target_start_io(mpt, ccb); return; default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); } static int mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts) { struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi; struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi; target_id_t tgt; uint32_t dval, pval, oval; int rv; if (IS_CURRENT_SETTINGS(cts) == 0) { tgt = cts->ccb_h.target_id; } else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) { if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) { return (-1); } } else { tgt = cts->ccb_h.target_id; } /* * We aren't looking at Port Page 2 BIOS settings here- * sometimes these have been known to be bogus XXX. * * For user settings, we pick the max from port page 0 * * For current settings we read the current settings out from * device page 0 for that target. */ if (IS_CURRENT_SETTINGS(cts)) { CONFIG_PAGE_SCSI_DEVICE_0 tmp; dval = 0; tmp = mpt->mpt_dev_page0[tgt]; rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp), FALSE, 5000); if (rv) { mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt); return (rv); } mpt2host_config_page_scsi_device_0(&tmp); mpt_lprt(mpt, MPT_PRT_DEBUG, "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt, tmp.NegotiatedParameters, tmp.Information); dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ? DP_WIDE : DP_NARROW; dval |= (mpt->mpt_disc_enable & (1 << tgt)) ? DP_DISC_ENABLE : DP_DISC_DISABL; dval |= (mpt->mpt_tag_enable & (1 << tgt)) ? DP_TQING_ENABLE : DP_TQING_DISABL; oval = tmp.NegotiatedParameters; oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK; oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET; pval = tmp.NegotiatedParameters; pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK; pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD; mpt->mpt_dev_page0[tgt] = tmp; } else { dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC; oval = mpt->mpt_port_page0.Capabilities; oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval); pval = mpt->mpt_port_page0.Capabilities; pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval); } spi->valid = 0; scsi->valid = 0; spi->flags = 0; scsi->flags = 0; spi->sync_offset = oval; spi->sync_period = pval; spi->valid |= CTS_SPI_VALID_SYNC_OFFSET; spi->valid |= CTS_SPI_VALID_SYNC_RATE; spi->valid |= CTS_SPI_VALID_BUS_WIDTH; if (dval & DP_WIDE) { spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT; } else { spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT; } if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) { scsi->valid = CTS_SCSI_VALID_TQ; if (dval & DP_TQING_ENABLE) { scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB; } spi->valid |= CTS_SPI_VALID_DISC; if (dval & DP_DISC_ENABLE) { spi->flags |= CTS_SPI_FLAGS_DISC_ENB; } } mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt, IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval); return (0); } static void mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff) { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; ptr = &mpt->mpt_dev_page1[tgt]; if (onoff) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE; } else { ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE; } } static void mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset) { PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr; ptr = &mpt->mpt_dev_page1[tgt]; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS; ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU; if (period == 0) { return; } ptr->RequestedParameters |= period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD; ptr->RequestedParameters |= offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET; if (period < 0xa) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT; } if (period < 0x9) { ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS; ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU; } } static int mpt_update_spi_config(struct mpt_softc *mpt, int tgt) { CONFIG_PAGE_SCSI_DEVICE_1 tmp; int rv; mpt_lprt(mpt, MPT_PRT_NEGOTIATION, "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n", tgt, mpt->mpt_dev_page1[tgt].RequestedParameters); tmp = mpt->mpt_dev_page1[tgt]; host2mpt_config_page_scsi_device_1(&tmp); rv = mpt_write_cur_cfg_page(mpt, tgt, &tmp.Header, sizeof(tmp), FALSE, 5000); if (rv) { mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n"); return (-1); } return (0); } /****************************** Timeout Recovery ******************************/ static int mpt_spawn_recovery_thread(struct mpt_softc *mpt) { int error; error = kproc_create(mpt_recovery_thread, mpt, &mpt->recovery_thread, /*flags*/0, /*altstack*/0, "mpt_recovery%d", mpt->unit); return (error); } static void mpt_terminate_recovery_thread(struct mpt_softc *mpt) { if (mpt->recovery_thread == NULL) { return; } mpt->shutdwn_recovery = 1; wakeup(mpt); /* * Sleep on a slightly different location * for this interlock just for added safety. */ mpt_sleep(mpt, &mpt->recovery_thread, PUSER, "thtrm", 0); } static void mpt_recovery_thread(void *arg) { struct mpt_softc *mpt; mpt = (struct mpt_softc *)arg; MPT_LOCK(mpt); for (;;) { if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { if (mpt->shutdwn_recovery == 0) { mpt_sleep(mpt, mpt, PUSER, "idle", 0); } } if (mpt->shutdwn_recovery != 0) { break; } mpt_recover_commands(mpt); } mpt->recovery_thread = NULL; wakeup(&mpt->recovery_thread); MPT_UNLOCK(mpt); kproc_exit(0); } static int mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags, u_int channel, target_id_t target, lun_id_t lun, u_int abort_ctx, int sleep_ok) { MSG_SCSI_TASK_MGMT *tmf_req; int error; /* * Wait for any current TMF request to complete. * We're only allowed to issue one TMF at a time. */ error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE, sleep_ok, MPT_TMF_MAX_TIMEOUT); if (error != 0) { mpt_reset(mpt, TRUE); return (ETIMEDOUT); } mpt_assign_serno(mpt, mpt->tmf_req); mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED; tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf; memset(tmf_req, 0, sizeof(*tmf_req)); tmf_req->TargetID = target; tmf_req->Bus = channel; tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT; tmf_req->TaskType = type; tmf_req->MsgFlags = flags; tmf_req->MsgContext = htole32(mpt->tmf_req->index | scsi_tmf_handler_id); be64enc(tmf_req->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun)); tmf_req->TaskMsgContext = abort_ctx; mpt_lprt(mpt, MPT_PRT_DEBUG, "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req, mpt->tmf_req->serno, tmf_req->MsgContext); if (mpt->verbose > MPT_PRT_DEBUG) { mpt_print_request(tmf_req); } KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0, ("mpt_scsi_send_tmf: tmf_req already on pending list")); TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links); error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req); if (error != MPT_OK) { TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links); mpt->tmf_req->state = REQ_STATE_FREE; mpt_reset(mpt, TRUE); } return (error); } /* * When a command times out, it is placed on the requeust_timeout_list * and we wake our recovery thread. The MPT-Fusion architecture supports * only a single TMF operation at a time, so we serially abort/bdr, etc, * the timedout transactions. The next TMF is issued either by the * completion handler of the current TMF waking our recovery thread, * or the TMF timeout handler causing a hard reset sequence. */ static void mpt_recover_commands(struct mpt_softc *mpt) { request_t *req; union ccb *ccb; int error; if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { /* * No work to do- leave. */ mpt_prt(mpt, "mpt_recover_commands: no requests.\n"); return; } /* * Flush any commands whose completion coincides with their timeout. */ mpt_intr(mpt); if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) { /* * The timedout commands have already * completed. This typically means * that either the timeout value was on * the hairy edge of what the device * requires or - more likely - interrupts * are not happening. */ mpt_prt(mpt, "Timedout requests already complete. " "Interrupts may not be functioning.\n"); mpt_enable_ints(mpt); return; } /* * We have no visibility into the current state of the * controller, so attempt to abort the commands in the * order they timed-out. For initiator commands, we * depend on the reply handler pulling requests off * the timeout list. */ while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) { uint16_t status; uint8_t response; MSG_REQUEST_HEADER *hdrp = req->req_vbuf; mpt_prt(mpt, "attempting to abort req %p:%u function %x\n", req, req->serno, hdrp->Function); ccb = req->ccb; if (ccb == NULL) { mpt_prt(mpt, "null ccb in timed out request. " "Resetting Controller.\n"); mpt_reset(mpt, TRUE); continue; } mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT); /* * Check to see if this is not an initiator command and * deal with it differently if it is. */ switch (hdrp->Function) { case MPI_FUNCTION_SCSI_IO_REQUEST: case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH: break; default: /* * XXX: FIX ME: need to abort target assists... */ mpt_prt(mpt, "just putting it back on the pend q\n"); TAILQ_REMOVE(&mpt->request_timeout_list, req, links); TAILQ_INSERT_HEAD(&mpt->request_pending_list, req, links); continue; } error = mpt_scsi_send_tmf(mpt, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK, 0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun, htole32(req->index | scsi_io_handler_id), TRUE); if (error != 0) { /* * mpt_scsi_send_tmf hard resets on failure, so no * need to do so here. Our queue should be emptied * by the hard reset. */ continue; } error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE, REQ_STATE_DONE, TRUE, 500); status = le16toh(mpt->tmf_req->IOCStatus); response = mpt->tmf_req->ResponseCode; mpt->tmf_req->state = REQ_STATE_FREE; if (error != 0) { /* * If we've errored out,, reset the controller. */ mpt_prt(mpt, "mpt_recover_commands: abort timed-out. " "Resetting controller\n"); mpt_reset(mpt, TRUE); continue; } if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) { mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. " "Resetting controller.\n", status); mpt_reset(mpt, TRUE); continue; } if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED && response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) { mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. " "Resetting controller.\n", response); mpt_reset(mpt, TRUE); continue; } mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno); } } /************************ Target Mode Support ****************************/ static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex) { MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc; PTR_SGE_TRANSACTION32 tep; PTR_SGE_SIMPLE32 se; bus_addr_t paddr; uint32_t fl; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); fc = req->req_vbuf; memset(fc, 0, MPT_REQUEST_AREA); fc->BufferCount = 1; fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST; fc->MsgContext = htole32(req->index | fc_els_handler_id); /* * Okay, set up ELS buffer pointers. ELS buffer pointers * consist of a TE SGL element (with details length of zero) * followed by a SIMPLE SGL element which holds the address * of the buffer. */ tep = (PTR_SGE_TRANSACTION32) &fc->SGL; tep->ContextSize = 4; tep->Flags = 0; tep->TransactionContext[0] = htole32(ioindex); se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0]; fl = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; fl <<= MPI_SGE_FLAGS_SHIFT; fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt)); se->FlagsLength = htole32(fl); se->Address = htole32((uint32_t) paddr); mpt_lprt(mpt, MPT_PRT_DEBUG, "add ELS index %d ioindex %d for %p:%u\n", req->index, ioindex, req, req->serno); KASSERT(((req->state & REQ_STATE_LOCKED) != 0), ("mpt_fc_post_els: request not locked")); mpt_send_cmd(mpt, req); } static void mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex) { PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc; PTR_CMD_BUFFER_DESCRIPTOR cb; bus_addr_t paddr; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); memset(req->req_vbuf, 0, MPT_REQUEST_AREA); MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING; fc = req->req_vbuf; fc->BufferCount = 1; fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST; fc->BufferLength = MIN(MPT_REQUEST_AREA - MPT_RQSL(mpt), UINT8_MAX); fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); cb = &fc->Buffer[0]; cb->IoIndex = htole16(ioindex); cb->u.PhysicalAddress32 = htole32((U32) paddr); mpt_check_doorbell(mpt); mpt_send_cmd(mpt, req); } static int mpt_add_els_buffers(struct mpt_softc *mpt) { int i; if (mpt->is_fc == 0) { return (TRUE); } if (mpt->els_cmds_allocated) { return (TRUE); } mpt->els_cmd_ptrs = malloc(MPT_MAX_ELS * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->els_cmd_ptrs == NULL) { return (FALSE); } /* * Feed the chip some ELS buffer resources */ for (i = 0; i < MPT_MAX_ELS; i++) { request_t *req = mpt_get_request(mpt, FALSE); if (req == NULL) { break; } req->state |= REQ_STATE_LOCKED; mpt->els_cmd_ptrs[i] = req; mpt_fc_post_els(mpt, req, i); } if (i == 0) { mpt_prt(mpt, "unable to add ELS buffer resources\n"); free(mpt->els_cmd_ptrs, M_DEVBUF); mpt->els_cmd_ptrs = NULL; return (FALSE); } if (i != MPT_MAX_ELS) { mpt_lprt(mpt, MPT_PRT_INFO, "only added %d of %d ELS buffers\n", i, MPT_MAX_ELS); } mpt->els_cmds_allocated = i; return(TRUE); } static int mpt_add_target_commands(struct mpt_softc *mpt) { int i, max; if (mpt->tgt_cmd_ptrs) { return (TRUE); } max = MPT_MAX_REQUESTS(mpt) >> 1; if (max > mpt->mpt_max_tgtcmds) { max = mpt->mpt_max_tgtcmds; } mpt->tgt_cmd_ptrs = - malloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); + mallocarray(max, sizeof(request_t *), M_DEVBUF, M_NOWAIT | M_ZERO); if (mpt->tgt_cmd_ptrs == NULL) { mpt_prt(mpt, "mpt_add_target_commands: could not allocate cmd ptrs\n"); return (FALSE); } for (i = 0; i < max; i++) { request_t *req; req = mpt_get_request(mpt, FALSE); if (req == NULL) { break; } req->state |= REQ_STATE_LOCKED; mpt->tgt_cmd_ptrs[i] = req; mpt_post_target_command(mpt, req, i); } if (i == 0) { mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n"); free(mpt->tgt_cmd_ptrs, M_DEVBUF); mpt->tgt_cmd_ptrs = NULL; return (FALSE); } mpt->tgt_cmds_allocated = i; if (i < max) { mpt_lprt(mpt, MPT_PRT_INFO, "added %d of %d target bufs\n", i, max); } return (i); } static int mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) { if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { mpt->twildcard = 1; } else if (lun >= MPT_MAX_LUNS) { return (EINVAL); } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { return (EINVAL); } if (mpt->tenabled == 0) { if (mpt->is_fc) { (void) mpt_fc_reset_link(mpt, 0); } mpt->tenabled = 1; } if (lun == CAM_LUN_WILDCARD) { mpt->trt_wildcard.enabled = 1; } else { mpt->trt[lun].enabled = 1; } return (0); } static int mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun) { int i; if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) { mpt->twildcard = 0; } else if (lun >= MPT_MAX_LUNS) { return (EINVAL); } else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) { return (EINVAL); } if (lun == CAM_LUN_WILDCARD) { mpt->trt_wildcard.enabled = 0; } else { mpt->trt[lun].enabled = 0; } for (i = 0; i < MPT_MAX_LUNS; i++) { if (mpt->trt[i].enabled) { break; } } if (i == MPT_MAX_LUNS && mpt->twildcard == 0) { if (mpt->is_fc) { (void) mpt_fc_reset_link(mpt, 0); } mpt->tenabled = 0; } return (0); } /* * Called with MPT lock held */ static void mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb) { struct ccb_scsiio *csio = &ccb->csio; request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id); mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req); switch (tgt->state) { case TGT_STATE_IN_CAM: break; case TGT_STATE_MOVING_DATA: mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_freeze_simq(mpt->sim, 1); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ; xpt_done(ccb); return; default: mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request " "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id); mpt_tgt_dump_req_state(mpt, cmd_req); mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR); xpt_done(ccb); return; } if (csio->dxfer_len) { bus_dmamap_callback_t *cb; PTR_MSG_TARGET_ASSIST_REQUEST ta; request_t *req; int error; KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE, ("dxfer_len %u but direction is NONE", csio->dxfer_len)); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); return; } ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; if (sizeof (bus_addr_t) > 4) { cb = mpt_execute_req_a64; } else { cb = mpt_execute_req; } req->ccb = ccb; ccb->ccb_h.ccb_req_ptr = req; /* * Record the currently active ccb and the * request for it in our target state area. */ tgt->ccb = ccb; tgt->req = req; memset(req->req_vbuf, 0, MPT_RQSL(mpt)); ta = req->req_vbuf; if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; ta->QueueTag = ssp->InitiatorTag; } else if (mpt->is_spi) { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; ta->QueueTag = sp->Tag; } ta->Function = MPI_FUNCTION_TARGET_ASSIST; ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); ta->ReplyWord = htole32(tgt->reply_desc); be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(csio->ccb_h.target_lun)); ta->RelativeOffset = tgt->bytes_xfered; ta->DataLength = ccb->csio.dxfer_len; if (ta->DataLength > tgt->resid) { ta->DataLength = tgt->resid; } /* * XXX Should be done after data transfer completes? */ csio->resid = csio->dxfer_len - ta->DataLength; tgt->resid -= csio->dxfer_len; tgt->bytes_xfered += csio->dxfer_len; if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; } #ifdef WE_TRUST_AUTO_GOOD_STATUS if ((ccb->ccb_h.flags & CAM_SEND_STATUS) && csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_AUTO_STATUS; } #endif tgt->state = TGT_STATE_SETTING_UP_FOR_DATA; mpt_lprt(mpt, MPT_PRT_DEBUG, "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u " "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len, tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state); error = bus_dmamap_load_ccb(mpt->buffer_dmat, req->dmap, ccb, cb, req, 0); if (error == EINPROGRESS) { xpt_freeze_simq(mpt->sim, 1); ccb->ccb_h.status |= CAM_RELEASE_SIMQ; } } else { /* * XXX: I don't know why this seems to happen, but * XXX: completing the CCB seems to make things happy. * XXX: This seems to happen if the initiator requests * XXX: enough data that we have to do multiple CTIOs. */ if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { mpt_lprt(mpt, MPT_PRT_DEBUG, "Meaningless STATUS CCB (%p): flags %x status %x " "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags, ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered); mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; xpt_done(ccb); return; } mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, (void *)&csio->sense_data, (ccb->ccb_h.flags & CAM_SEND_SENSE) ? csio->sense_len : 0); } } static void mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req, lun_id_t lun, int send, uint8_t *data, size_t length) { mpt_tgt_state_t *tgt; PTR_MSG_TARGET_ASSIST_REQUEST ta; SGE_SIMPLE32 *se; uint32_t flags; uint8_t *dptr; bus_addr_t pptr; request_t *req; /* * We enter with resid set to the data load for the command. */ tgt = MPT_TGT_STATE(mpt, cmd_req); if (length == 0 || tgt->resid == 0) { tgt->resid = 0; mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL, 0); return; } if ((req = mpt_get_request(mpt, FALSE)) == NULL) { mpt_prt(mpt, "out of resources- dropping local response\n"); return; } tgt->is_local = 1; memset(req->req_vbuf, 0, MPT_RQSL(mpt)); ta = req->req_vbuf; if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf; ta->QueueTag = ssp->InitiatorTag; } else if (mpt->is_spi) { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf; ta->QueueTag = sp->Tag; } ta->Function = MPI_FUNCTION_TARGET_ASSIST; ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); ta->ReplyWord = htole32(tgt->reply_desc); be64enc(ta->LUN, CAM_EXTLUN_BYTE_SWIZZLE(lun)); ta->RelativeOffset = 0; ta->DataLength = length; dptr = req->req_vbuf; dptr += MPT_RQSL(mpt); pptr = req->req_pbuf; pptr += MPT_RQSL(mpt); memcpy(dptr, data, min(length, MPT_RQSL(mpt))); se = (SGE_SIMPLE32 *) &ta->SGL[0]; memset(se, 0,sizeof (*se)); flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT; if (send) { ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION; flags |= MPI_SGE_FLAGS_HOST_TO_IOC; } se->Address = pptr; MPI_pSGE_SET_LENGTH(se, length); flags |= MPI_SGE_FLAGS_LAST_ELEMENT; flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; MPI_pSGE_SET_FLAGS(se, flags); tgt->ccb = NULL; tgt->req = req; tgt->resid -= length; tgt->bytes_xfered = length; #ifdef WE_TRUST_AUTO_GOOD_STATUS tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS; #else tgt->state = TGT_STATE_MOVING_DATA; #endif mpt_send_cmd(mpt, req); } /* * Abort queued up CCBs */ static cam_status mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb) { struct mpt_hdr_stailq *lp; struct ccb_hdr *srch; union ccb *accb = ccb->cab.abort_ccb; tgt_resource_t *trtp; mpt_tgt_state_t *tgt; request_t *req; uint32_t tag; mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb); if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) trtp = &mpt->trt_wildcard; else trtp = &mpt->trt[ccb->ccb_h.target_lun]; if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) { lp = &trtp->atios; tag = accb->atio.tag_id; } else { lp = &trtp->inots; tag = accb->cin1.tag_id; } /* Search the CCB among queued. */ STAILQ_FOREACH(srch, lp, sim_links.stqe) { if (srch != &accb->ccb_h) continue; STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe); accb->ccb_h.status = CAM_REQ_ABORTED; xpt_done(accb); return (CAM_REQ_CMP); } /* Search the CCB among running. */ req = MPT_TAG_2_REQ(mpt, tag); tgt = MPT_TGT_STATE(mpt, req); if (tgt->tag_id == tag) { mpt_abort_target_cmd(mpt, req); return (CAM_REQ_CMP); } return (CAM_UA_ABORT); } /* * Ask the MPT to abort the current target command */ static int mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req) { int error; request_t *req; PTR_MSG_TARGET_MODE_ABORT abtp; req = mpt_get_request(mpt, FALSE); if (req == NULL) { return (-1); } abtp = req->req_vbuf; memset(abtp, 0, sizeof (*abtp)); abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO; abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT; abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc); error = 0; if (mpt->is_fc || mpt->is_sas) { mpt_send_cmd(mpt, req); } else { error = mpt_send_handshake_cmd(mpt, sizeof(*req), req); } return (error); } /* * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the * FC929 to set bogus FC_RSP fields (nonzero residuals * but w/o RESID fields set). This causes QLogic initiators * to think maybe that a frame was lost. * * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because * we use allocated requests to do TARGET_ASSIST and we * need to know when to release them. */ static void mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req, uint8_t status, uint8_t const *sense_data, u_int sense_len) { uint8_t *cmd_vbuf; mpt_tgt_state_t *tgt; PTR_MSG_TARGET_STATUS_SEND_REQUEST tp; request_t *req; bus_addr_t paddr; int resplen = 0; uint32_t fl; cmd_vbuf = cmd_req->req_vbuf; cmd_vbuf += MPT_RQSL(mpt); tgt = MPT_TGT_STATE(mpt, cmd_req); if ((req = mpt_get_request(mpt, FALSE)) == NULL) { if (mpt->outofbeer == 0) { mpt->outofbeer = 1; xpt_freeze_simq(mpt->sim, 1); mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n"); } if (ccb) { ccb->ccb_h.status &= ~CAM_SIM_QUEUED; mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ); xpt_done(ccb); } else { mpt_prt(mpt, "could not allocate status request- dropping\n"); } return; } req->ccb = ccb; if (ccb) { ccb->ccb_h.ccb_mpt_ptr = mpt; ccb->ccb_h.ccb_req_ptr = req; } /* * Record the currently active ccb, if any, and the * request for it in our target state area. */ tgt->ccb = ccb; tgt->req = req; tgt->state = TGT_STATE_SENDING_STATUS; tp = req->req_vbuf; paddr = req->req_pbuf; paddr += MPT_RQSL(mpt); memset(tp, 0, sizeof (*tp)); tp->StatusCode = status; tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND; if (mpt->is_fc) { PTR_MPI_TARGET_FCP_CMD_BUFFER fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf; uint8_t *sts_vbuf; uint32_t *rsp; sts_vbuf = req->req_vbuf; sts_vbuf += MPT_RQSL(mpt); rsp = (uint32_t *) sts_vbuf; memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN)); /* * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate. * It has to be big-endian in memory and is organized * in 32 bit words, which are much easier to deal with * as words which are swizzled as needed. * * All we're filling here is the FC_RSP payload. * We may just have the chip synthesize it if * we have no residual and an OK status. * */ memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER)); rsp[2] = htobe32(status); #define MIN_FCP_RESPONSE_SIZE 24 #ifndef WE_TRUST_AUTO_GOOD_STATUS resplen = MIN_FCP_RESPONSE_SIZE; #endif if (tgt->resid < 0) { rsp[2] |= htobe32(0x400); /* XXXX NEED MNEMONIC!!!! */ rsp[3] = htobe32(-tgt->resid); resplen = MIN_FCP_RESPONSE_SIZE; } else if (tgt->resid > 0) { rsp[2] |= htobe32(0x800); /* XXXX NEED MNEMONIC!!!! */ rsp[3] = htobe32(tgt->resid); resplen = MIN_FCP_RESPONSE_SIZE; } if (sense_len > 0) { rsp[2] |= htobe32(0x200); /* XXXX NEED MNEMONIC!!!! */ rsp[4] = htobe32(sense_len); memcpy(&rsp[6], sense_data, sense_len); resplen = MIN_FCP_RESPONSE_SIZE + sense_len; } } else if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf; memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN)); } else { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf; tp->QueueTag = htole16(sp->Tag); memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN)); } tp->ReplyWord = htole32(tgt->reply_desc); tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id); #ifdef WE_CAN_USE_AUTO_REPOST tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER; #endif if (status == SCSI_STATUS_OK && resplen == 0) { tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS; } else { tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr); fl = MPI_SGE_FLAGS_HOST_TO_IOC | MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER; fl <<= MPI_SGE_FLAGS_SHIFT; fl |= resplen; tp->StatusDataSGE.FlagsLength = htole32(fl); } mpt_lprt(mpt, MPT_PRT_DEBUG, "STATUS_CCB %p (with%s sense) tag %x req %p:%u resid %u\n", ccb, sense_len > 0 ? "" : "out", tgt->tag_id, req, req->serno, tgt->resid); if (mpt->verbose > MPT_PRT_DEBUG) mpt_print_request(req->req_vbuf); if (ccb) { ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG; mpt_req_timeout(req, SBT_1S * 60, mpt_timeout, ccb); } mpt_send_cmd(mpt, req); } static void mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc, tgt_resource_t *trtp, int init_id) { struct ccb_immediate_notify *inot; mpt_tgt_state_t *tgt; tgt = MPT_TGT_STATE(mpt, req); inot = (struct ccb_immediate_notify *) STAILQ_FIRST(&trtp->inots); if (inot == NULL) { mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n"); mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL, 0); return; } STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe); mpt_lprt(mpt, MPT_PRT_DEBUG1, "Get FREE INOT %p lun %jx\n", inot, (uintmax_t)inot->ccb_h.target_lun); inot->initiator_id = init_id; /* XXX */ inot->tag_id = tgt->tag_id; inot->seq_id = 0; /* * This is a somewhat grotesque attempt to map from task management * to old style SCSI messages. God help us all. */ switch (fc) { case MPT_QUERY_TASK_SET: inot->arg = MSG_QUERY_TASK_SET; break; case MPT_ABORT_TASK_SET: inot->arg = MSG_ABORT_TASK_SET; break; case MPT_CLEAR_TASK_SET: inot->arg = MSG_CLEAR_TASK_SET; break; case MPT_QUERY_ASYNC_EVENT: inot->arg = MSG_QUERY_ASYNC_EVENT; break; case MPT_LOGICAL_UNIT_RESET: inot->arg = MSG_LOGICAL_UNIT_RESET; break; case MPT_TARGET_RESET: inot->arg = MSG_TARGET_RESET; break; case MPT_CLEAR_ACA: inot->arg = MSG_CLEAR_ACA; break; default: inot->arg = MSG_NOOP; break; } tgt->ccb = (union ccb *) inot; inot->ccb_h.status = CAM_MESSAGE_RECV; xpt_done((union ccb *)inot); } static void mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc) { static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = { 0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32, 'F', 'R', 'E', 'E', 'B', 'S', 'D', ' ', 'L', 'S', 'I', '-', 'L', 'O', 'G', 'I', 'C', ' ', 'N', 'U', 'L', 'D', 'E', 'V', '0', '0', '0', '1' }; struct ccb_accept_tio *atiop; lun_id_t lun; int tag_action = 0; mpt_tgt_state_t *tgt; tgt_resource_t *trtp = NULL; U8 *lunptr; U8 *vbuf; U16 ioindex; mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE; uint8_t *cdbp; /* * Stash info for the current command where we can get at it later. */ vbuf = req->req_vbuf; vbuf += MPT_RQSL(mpt); if (mpt->verbose >= MPT_PRT_DEBUG) { mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf, max(sizeof (MPI_TARGET_FCP_CMD_BUFFER), max(sizeof (MPI_TARGET_SSP_CMD_BUFFER), sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER)))); } /* * Get our state pointer set up. */ tgt = MPT_TGT_STATE(mpt, req); if (tgt->state != TGT_STATE_LOADED) { mpt_tgt_dump_req_state(mpt, req); panic("bad target state in mpt_scsi_tgt_atio"); } memset(tgt, 0, sizeof (mpt_tgt_state_t)); tgt->state = TGT_STATE_IN_CAM; tgt->reply_desc = reply_desc; ioindex = GET_IO_INDEX(reply_desc); /* * The tag we construct here allows us to find the * original request that the command came in with. * * This way we don't have to depend on anything but the * tag to find things when CCBs show back up from CAM. */ tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex); if (mpt->is_fc) { PTR_MPI_TARGET_FCP_CMD_BUFFER fc; fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf; if (fc->FcpCntl[2]) { /* * Task Management Request */ switch (fc->FcpCntl[2]) { case 0x1: fct = MPT_QUERY_TASK_SET; break; case 0x2: fct = MPT_ABORT_TASK_SET; break; case 0x4: fct = MPT_CLEAR_TASK_SET; break; case 0x8: fct = MPT_QUERY_ASYNC_EVENT; break; case 0x10: fct = MPT_LOGICAL_UNIT_RESET; break; case 0x20: fct = MPT_TARGET_RESET; break; case 0x40: fct = MPT_CLEAR_ACA; break; default: mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n", fc->FcpCntl[2]); mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_OK, NULL, 0); return; } } else { switch (fc->FcpCntl[1]) { case 0: tag_action = MSG_SIMPLE_Q_TAG; break; case 1: tag_action = MSG_HEAD_OF_Q_TAG; break; case 2: tag_action = MSG_ORDERED_Q_TAG; break; default: /* * Bah. Ignore Untagged Queing and ACA */ tag_action = MSG_SIMPLE_Q_TAG; break; } } tgt->resid = be32toh(fc->FcpDl); cdbp = fc->FcpCdb; lunptr = fc->FcpLun; tgt->itag = fc->OptionalOxid; } else if (mpt->is_sas) { PTR_MPI_TARGET_SSP_CMD_BUFFER ssp; ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf; cdbp = ssp->CDB; lunptr = ssp->LogicalUnitNumber; tgt->itag = ssp->InitiatorTag; } else { PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp; sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf; cdbp = sp->CDB; lunptr = sp->LogicalUnitNumber; tgt->itag = sp->Tag; } lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(lunptr)); /* * Deal with non-enabled or bad luns here. */ if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 || mpt->trt[lun].enabled == 0) { if (mpt->twildcard) { trtp = &mpt->trt_wildcard; } else if (fct == MPT_NIL_TMT_VALUE) { /* * In this case, we haven't got an upstream listener * for either a specific lun or wildcard luns. We * have to make some sensible response. For regular * inquiry, just return some NOT HERE inquiry data. * For VPD inquiry, report illegal field in cdb. * For REQUEST SENSE, just return NO SENSE data. * REPORT LUNS gets illegal command. * All other commands get 'no such device'. */ uint8_t sense[MPT_SENSE_SIZE]; size_t len; memset(sense, 0, sizeof(sense)); sense[0] = 0xf0; sense[2] = 0x5; sense[7] = 0x8; switch (cdbp[0]) { case INQUIRY: { if (cdbp[1] != 0) { sense[12] = 0x26; sense[13] = 0x01; break; } len = min(tgt->resid, cdbp[4]); len = min(len, sizeof (null_iqd)); mpt_lprt(mpt, MPT_PRT_DEBUG, "local inquiry %ld bytes\n", (long) len); mpt_scsi_tgt_local(mpt, req, lun, 1, null_iqd, len); return; } case REQUEST_SENSE: { sense[2] = 0x0; len = min(tgt->resid, cdbp[4]); len = min(len, sizeof (sense)); mpt_lprt(mpt, MPT_PRT_DEBUG, "local reqsense %ld bytes\n", (long) len); mpt_scsi_tgt_local(mpt, req, lun, 1, sense, len); return; } case REPORT_LUNS: mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n"); sense[12] = 0x26; return; default: mpt_lprt(mpt, MPT_PRT_DEBUG, "CMD 0x%x to unmanaged lun %jx\n", cdbp[0], (uintmax_t)lun); sense[12] = 0x25; break; } mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_CHECK_COND, sense, sizeof(sense)); return; } /* otherwise, leave trtp NULL */ } else { trtp = &mpt->trt[lun]; } /* * Deal with any task management */ if (fct != MPT_NIL_TMT_VALUE) { if (trtp == NULL) { mpt_prt(mpt, "task mgmt function %x but no listener\n", fct); mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_OK, NULL, 0); } else { mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp, GET_INITIATOR_INDEX(reply_desc)); } return; } atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios); if (atiop == NULL) { mpt_lprt(mpt, MPT_PRT_WARN, "no ATIOs for lun %jx- sending back %s\n", (uintmax_t)lun, mpt->tenabled? "QUEUE FULL" : "BUSY"); mpt_scsi_tgt_status(mpt, NULL, req, mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY, NULL, 0); return; } STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe); mpt_lprt(mpt, MPT_PRT_DEBUG1, "Get FREE ATIO %p lun %jx\n", atiop, (uintmax_t)atiop->ccb_h.target_lun); atiop->ccb_h.ccb_mpt_ptr = mpt; atiop->ccb_h.status = CAM_CDB_RECVD; atiop->ccb_h.target_lun = lun; atiop->sense_len = 0; atiop->tag_id = tgt->tag_id; atiop->init_id = GET_INITIATOR_INDEX(reply_desc); atiop->cdb_len = 16; memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len); if (tag_action) { atiop->tag_action = tag_action; atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID; } if (mpt->verbose >= MPT_PRT_DEBUG) { int i; mpt_prt(mpt, "START_CCB %p for lun %jx CDB=<", atiop, (uintmax_t)atiop->ccb_h.target_lun); for (i = 0; i < atiop->cdb_len; i++) { mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff, (i == (atiop->cdb_len - 1))? '>' : ' '); } mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n", tgt->itag, tgt->tag_id, tgt->reply_desc, tgt->resid); } xpt_done((union ccb *)atiop); } static void mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req) { mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p " "nx %d tag 0x%08x itag 0x%04x state=%d\n", req, req->serno, tgt->reply_desc, tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers, tgt->tag_id, tgt->itag, tgt->state); } static void mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req) { mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno, req->index, req->index, req->state); mpt_tgt_dump_tgt_state(mpt, req); } static int mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame) { int dbg; union ccb *ccb; U16 status; if (reply_frame == NULL) { /* * Figure out what the state of the command is. */ mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req); #ifdef INVARIANTS mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__); if (tgt->req) { mpt_req_not_spcl(mpt, tgt->req, "turbo scsi_tgt_reply associated req", __LINE__); } #endif switch(tgt->state) { case TGT_STATE_LOADED: /* * This is a new command starting. */ mpt_scsi_tgt_atio(mpt, req, reply_desc); break; case TGT_STATE_MOVING_DATA: { ccb = tgt->ccb; if (tgt->req == NULL) { panic("mpt: turbo target reply with null " "associated request moving data"); /* NOTREACHED */ } if (ccb == NULL) { if (tgt->is_local == 0) { panic("mpt: turbo target reply with " "null associated ccb moving data"); /* NOTREACHED */ } mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_ASSIST local done\n"); TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; mpt_scsi_tgt_status(mpt, NULL, req, 0, NULL, 0); return (TRUE); } tgt->ccb = NULL; tgt->nxfers++; mpt_req_untimeout(tgt->req, mpt_timeout, ccb); mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n", ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id); /* * Free the Target Assist Request */ KASSERT(tgt->req->ccb == ccb, ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, tgt->req->serno, tgt->req->ccb)); TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; /* * Do we need to send status now? That is, are * we done with all our data transfers? */ if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) { mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__)); tgt->state = TGT_STATE_IN_CAM; if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } xpt_done(ccb); break; } /* * Otherwise, send status (and sense) */ mpt_scsi_tgt_status(mpt, ccb, req, ccb->csio.scsi_status, (void *)&ccb->csio.sense_data, (ccb->ccb_h.flags & CAM_SEND_SENSE) ? ccb->csio.sense_len : 0); break; } case TGT_STATE_SENDING_STATUS: case TGT_STATE_MOVING_DATA_AND_STATUS: { int ioindex; ccb = tgt->ccb; if (tgt->req == NULL) { panic("mpt: turbo target reply with null " "associated request sending status"); /* NOTREACHED */ } if (ccb) { tgt->ccb = NULL; if (tgt->state == TGT_STATE_MOVING_DATA_AND_STATUS) { tgt->nxfers++; } mpt_req_untimeout(tgt->req, mpt_timeout, ccb); if (ccb->ccb_h.flags & CAM_SEND_SENSE) { ccb->ccb_h.status |= CAM_SENT_SENSE; } mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_STATUS tag %x sts %x flgs %x req " "%p\n", ccb->csio.tag_id, ccb->ccb_h.status, ccb->ccb_h.flags, tgt->req); /* * Free the Target Send Status Request */ KASSERT(tgt->req->ccb == ccb, ("tgt->req %p:%u tgt->req->ccb %p", tgt->req, tgt->req->serno, tgt->req->ccb)); /* * Notify CAM that we're done */ mpt_set_ccb_status(ccb, CAM_REQ_CMP); ccb->ccb_h.status &= ~CAM_SIM_QUEUED; KASSERT(ccb->ccb_h.status, ("ZERO ccb sts at %d", __LINE__)); tgt->ccb = NULL; } else { mpt_lprt(mpt, MPT_PRT_DEBUG, "TARGET_STATUS non-CAM for req %p:%u\n", tgt->req, tgt->req->serno); } TAILQ_REMOVE(&mpt->request_pending_list, tgt->req, links); mpt_free_request(mpt, tgt->req); tgt->req = NULL; /* * And re-post the Command Buffer. * This will reset the state. */ ioindex = GET_IO_INDEX(reply_desc); TAILQ_REMOVE(&mpt->request_pending_list, req, links); tgt->is_local = 0; mpt_post_target_command(mpt, req, ioindex); /* * And post a done for anyone who cares */ if (ccb) { if (mpt->outofbeer) { ccb->ccb_h.status |= CAM_RELEASE_SIMQ; mpt->outofbeer = 0; mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n"); } xpt_done(ccb); } break; } case TGT_STATE_NIL: /* XXX This Never Happens XXX */ tgt->state = TGT_STATE_LOADED; break; default: mpt_prt(mpt, "Unknown Target State 0x%x in Context " "Reply Function\n", tgt->state); } return (TRUE); } status = le16toh(reply_frame->IOCStatus); if (status != MPI_IOCSTATUS_SUCCESS) { dbg = MPT_PRT_ERROR; } else { dbg = MPT_PRT_DEBUG1; } mpt_lprt(mpt, dbg, "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n", req, req->serno, reply_frame, reply_frame->Function, status); switch (reply_frame->Function) { case MPI_FUNCTION_TARGET_CMD_BUFFER_POST: { mpt_tgt_state_t *tgt; #ifdef INVARIANTS mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__); #endif if (status != MPI_IOCSTATUS_SUCCESS) { /* * XXX What to do? */ break; } tgt = MPT_TGT_STATE(mpt, req); KASSERT(tgt->state == TGT_STATE_LOADING, ("bad state 0x%x on reply to buffer post", tgt->state)); mpt_assign_serno(mpt, req); tgt->state = TGT_STATE_LOADED; break; } case MPI_FUNCTION_TARGET_ASSIST: #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__); #endif mpt_prt(mpt, "target assist completion\n"); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; case MPI_FUNCTION_TARGET_STATUS_SEND: #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__); #endif mpt_prt(mpt, "status send completion\n"); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; case MPI_FUNCTION_TARGET_MODE_ABORT: { PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp = (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame; PTR_MSG_TARGET_MODE_ABORT abtp = (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf; uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord)); #ifdef INVARIANTS mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__); #endif mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n", cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount)); TAILQ_REMOVE(&mpt->request_pending_list, req, links); mpt_free_request(mpt, req); break; } default: mpt_prt(mpt, "Unknown Target Address Reply Function code: " "0x%x\n", reply_frame->Function); break; } return (TRUE); } Index: head/sys/dev/mrsas/mrsas.c =================================================================== --- head/sys/dev/mrsas/mrsas.c (revision 327948) +++ head/sys/dev/mrsas/mrsas.c (revision 327949) @@ -1,4601 +1,4602 @@ /* * Copyright (c) 2015, AVAGO Tech. All rights reserved. Author: Marian Choy * Copyright (c) 2014, LSI Corp. All rights reserved. Author: Marian Choy * Support: freebsdraid@avagotech.com * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are * met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. 2. Redistributions * in binary form must reproduce the above copyright notice, this list of * conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. 3. Neither the name of the * nor the names of its contributors may be used to endorse or * promote products derived from this software without specific prior written * permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * The views and conclusions contained in the software and documentation are * those of the authors and should not be interpreted as representing * official policies,either expressed or implied, of the FreeBSD Project. * * Send feedback to: Mail to: AVAGO TECHNOLOGIES 1621 * Barber Lane, Milpitas, CA 95035 ATTN: MegaRaid FreeBSD * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include /* * Function prototypes */ static d_open_t mrsas_open; static d_close_t mrsas_close; static d_read_t mrsas_read; static d_write_t mrsas_write; static d_ioctl_t mrsas_ioctl; static d_poll_t mrsas_poll; static void mrsas_ich_startup(void *arg); static struct mrsas_mgmt_info mrsas_mgmt_info; static struct mrsas_ident *mrsas_find_ident(device_t); static int mrsas_setup_msix(struct mrsas_softc *sc); static int mrsas_allocate_msix(struct mrsas_softc *sc); static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode); static void mrsas_flush_cache(struct mrsas_softc *sc); static void mrsas_reset_reply_desc(struct mrsas_softc *sc); static void mrsas_ocr_thread(void *arg); static int mrsas_get_map_info(struct mrsas_softc *sc); static int mrsas_get_ld_map_info(struct mrsas_softc *sc); static int mrsas_sync_map_info(struct mrsas_softc *sc); static int mrsas_get_pd_list(struct mrsas_softc *sc); static int mrsas_get_ld_list(struct mrsas_softc *sc); static int mrsas_setup_irq(struct mrsas_softc *sc); static int mrsas_alloc_mem(struct mrsas_softc *sc); static int mrsas_init_fw(struct mrsas_softc *sc); static int mrsas_setup_raidmap(struct mrsas_softc *sc); static void megasas_setup_jbod_map(struct mrsas_softc *sc); static int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend); static int mrsas_clear_intr(struct mrsas_softc *sc); static int mrsas_get_ctrl_info(struct mrsas_softc *sc); static void mrsas_update_ext_vd_details(struct mrsas_softc *sc); static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd_to_abort); static struct mrsas_softc * mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg); u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset); u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd); void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc); int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr); int mrsas_init_adapter(struct mrsas_softc *sc); int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc); int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc); int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc); int mrsas_ioc_init(struct mrsas_softc *sc); int mrsas_bus_scan(struct mrsas_softc *sc); int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason); int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason); int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex); int mrsas_reset_targets(struct mrsas_softc *sc); int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, int size); void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd); void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); void mrsas_disable_intr(struct mrsas_softc *sc); void mrsas_enable_intr(struct mrsas_softc *sc); void mrsas_free_ioc_cmd(struct mrsas_softc *sc); void mrsas_free_mem(struct mrsas_softc *sc); void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp); void mrsas_isr(void *arg); void mrsas_teardown_intr(struct mrsas_softc *sc); void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error); void mrsas_kill_hba(struct mrsas_softc *sc); void mrsas_aen_handler(struct mrsas_softc *sc); void mrsas_write_reg(struct mrsas_softc *sc, int offset, u_int32_t value); void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, u_int32_t req_desc_hi); void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc); void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, u_int8_t status); void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus); struct mrsas_mfi_cmd *mrsas_get_mfi_cmd(struct mrsas_softc *sc); MRSAS_REQUEST_DESCRIPTOR_UNION *mrsas_build_mpt_cmd (struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); extern int mrsas_cam_attach(struct mrsas_softc *sc); extern void mrsas_cam_detach(struct mrsas_softc *sc); extern void mrsas_cmd_done(struct mrsas_softc *sc, struct mrsas_mpt_cmd *cmd); extern void mrsas_free_frame(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd); extern int mrsas_alloc_mfi_cmds(struct mrsas_softc *sc); extern struct mrsas_mpt_cmd *mrsas_get_mpt_cmd(struct mrsas_softc *sc); extern int mrsas_passthru(struct mrsas_softc *sc, void *arg, u_long ioctlCmd); extern uint8_t MR_ValidateMapInfo(struct mrsas_softc *sc); extern u_int16_t MR_GetLDTgtId(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); extern MR_LD_RAID *MR_LdRaidGet(u_int32_t ld, MR_DRV_RAID_MAP_ALL * map); extern void mrsas_xpt_freeze(struct mrsas_softc *sc); extern void mrsas_xpt_release(struct mrsas_softc *sc); extern MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_get_request_desc(struct mrsas_softc *sc, u_int16_t index); extern int mrsas_bus_scan_sim(struct mrsas_softc *sc, struct cam_sim *sim); static int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc); static void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc); SYSCTL_NODE(_hw, OID_AUTO, mrsas, CTLFLAG_RD, 0, "MRSAS Driver Parameters"); /* * PCI device struct and table * */ typedef struct mrsas_ident { uint16_t vendor; uint16_t device; uint16_t subvendor; uint16_t subdevice; const char *desc; } MRSAS_CTLR_ID; MRSAS_CTLR_ID device_table[] = { {0x1000, MRSAS_TBOLT, 0xffff, 0xffff, "AVAGO Thunderbolt SAS Controller"}, {0x1000, MRSAS_INVADER, 0xffff, 0xffff, "AVAGO Invader SAS Controller"}, {0x1000, MRSAS_FURY, 0xffff, 0xffff, "AVAGO Fury SAS Controller"}, {0x1000, MRSAS_INTRUDER, 0xffff, 0xffff, "AVAGO Intruder SAS Controller"}, {0x1000, MRSAS_INTRUDER_24, 0xffff, 0xffff, "AVAGO Intruder_24 SAS Controller"}, {0x1000, MRSAS_CUTLASS_52, 0xffff, 0xffff, "AVAGO Cutlass_52 SAS Controller"}, {0x1000, MRSAS_CUTLASS_53, 0xffff, 0xffff, "AVAGO Cutlass_53 SAS Controller"}, {0, 0, 0, 0, NULL} }; /* * Character device entry points * */ static struct cdevsw mrsas_cdevsw = { .d_version = D_VERSION, .d_open = mrsas_open, .d_close = mrsas_close, .d_read = mrsas_read, .d_write = mrsas_write, .d_ioctl = mrsas_ioctl, .d_poll = mrsas_poll, .d_name = "mrsas", }; MALLOC_DEFINE(M_MRSAS, "mrsasbuf", "Buffers for the MRSAS driver"); /* * In the cdevsw routines, we find our softc by using the si_drv1 member of * struct cdev. We set this variable to point to our softc in our attach * routine when we create the /dev entry. */ int mrsas_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { struct mrsas_softc *sc; sc = dev->si_drv1; return (0); } int mrsas_close(struct cdev *dev, int fflag, int devtype, struct thread *td) { struct mrsas_softc *sc; sc = dev->si_drv1; return (0); } int mrsas_read(struct cdev *dev, struct uio *uio, int ioflag) { struct mrsas_softc *sc; sc = dev->si_drv1; return (0); } int mrsas_write(struct cdev *dev, struct uio *uio, int ioflag) { struct mrsas_softc *sc; sc = dev->si_drv1; return (0); } /* * Register Read/Write Functions * */ void mrsas_write_reg(struct mrsas_softc *sc, int offset, u_int32_t value) { bus_space_tag_t bus_tag = sc->bus_tag; bus_space_handle_t bus_handle = sc->bus_handle; bus_space_write_4(bus_tag, bus_handle, offset, value); } u_int32_t mrsas_read_reg(struct mrsas_softc *sc, int offset) { bus_space_tag_t bus_tag = sc->bus_tag; bus_space_handle_t bus_handle = sc->bus_handle; return ((u_int32_t)bus_space_read_4(bus_tag, bus_handle, offset)); } /* * Interrupt Disable/Enable/Clear Functions * */ void mrsas_disable_intr(struct mrsas_softc *sc) { u_int32_t mask = 0xFFFFFFFF; u_int32_t status; sc->mask_interrupts = 1; mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), mask); /* Dummy read to force pci flush */ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); } void mrsas_enable_intr(struct mrsas_softc *sc) { u_int32_t mask = MFI_FUSION_ENABLE_INTERRUPT_MASK; u_int32_t status; sc->mask_interrupts = 0; mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status), ~0); status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); mrsas_write_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask), ~mask); status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_mask)); } static int mrsas_clear_intr(struct mrsas_softc *sc) { u_int32_t status; /* Read received interrupt */ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_intr_status)); /* Not our interrupt, so just return */ if (!(status & MFI_FUSION_ENABLE_INTERRUPT_MASK)) return (0); /* We got a reply interrupt */ return (1); } /* * PCI Support Functions * */ static struct mrsas_ident * mrsas_find_ident(device_t dev) { struct mrsas_ident *pci_device; for (pci_device = device_table; pci_device->vendor != 0; pci_device++) { if ((pci_device->vendor == pci_get_vendor(dev)) && (pci_device->device == pci_get_device(dev)) && ((pci_device->subvendor == pci_get_subvendor(dev)) || (pci_device->subvendor == 0xffff)) && ((pci_device->subdevice == pci_get_subdevice(dev)) || (pci_device->subdevice == 0xffff))) return (pci_device); } return (NULL); } static int mrsas_probe(device_t dev) { static u_int8_t first_ctrl = 1; struct mrsas_ident *id; if ((id = mrsas_find_ident(dev)) != NULL) { if (first_ctrl) { printf("AVAGO MegaRAID SAS FreeBSD mrsas driver version: %s\n", MRSAS_VERSION); first_ctrl = 0; } device_set_desc(dev, id->desc); /* between BUS_PROBE_DEFAULT and BUS_PROBE_LOW_PRIORITY */ return (-30); } return (ENXIO); } /* * mrsas_setup_sysctl: setup sysctl values for mrsas * input: Adapter instance soft state * * Setup sysctl entries for mrsas driver. */ static void mrsas_setup_sysctl(struct mrsas_softc *sc) { struct sysctl_ctx_list *sysctl_ctx = NULL; struct sysctl_oid *sysctl_tree = NULL; char tmpstr[80], tmpstr2[80]; /* * Setup the sysctl variable so the user can change the debug level * on the fly. */ snprintf(tmpstr, sizeof(tmpstr), "MRSAS controller %d", device_get_unit(sc->mrsas_dev)); snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mrsas_dev)); sysctl_ctx = device_get_sysctl_ctx(sc->mrsas_dev); if (sysctl_ctx != NULL) sysctl_tree = device_get_sysctl_tree(sc->mrsas_dev); if (sysctl_tree == NULL) { sysctl_ctx_init(&sc->sysctl_ctx); sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw_mrsas), OID_AUTO, tmpstr2, CTLFLAG_RD, 0, tmpstr); if (sc->sysctl_tree == NULL) return; sysctl_ctx = &sc->sysctl_ctx; sysctl_tree = sc->sysctl_tree; } SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "disable_ocr", CTLFLAG_RW, &sc->disableOnlineCtrlReset, 0, "Disable the use of OCR"); SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "driver_version", CTLFLAG_RD, MRSAS_VERSION, strlen(MRSAS_VERSION), "driver version"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reset_count", CTLFLAG_RD, &sc->reset_count, 0, "number of ocr from start of the day"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "fw_outstanding", CTLFLAG_RD, &sc->fw_outstanding.val_rdonly, 0, "FW outstanding commands"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, &sc->io_cmds_highwater, 0, "Max FW outstanding commands"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "mrsas_debug", CTLFLAG_RW, &sc->mrsas_debug, 0, "Driver debug level"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "mrsas_io_timeout", CTLFLAG_RW, &sc->mrsas_io_timeout, 0, "Driver IO timeout value in mili-second."); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "mrsas_fw_fault_check_delay", CTLFLAG_RW, &sc->mrsas_fw_fault_check_delay, 0, "FW fault check thread delay in seconds. "); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reset_in_progress", CTLFLAG_RD, &sc->reset_in_progress, 0, "ocr in progress status"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "block_sync_cache", CTLFLAG_RW, &sc->block_sync_cache, 0, "Block SYNC CACHE at driver. "); } /* * mrsas_get_tunables: get tunable parameters. * input: Adapter instance soft state * * Get tunable parameters. This will help to debug driver at boot time. */ static void mrsas_get_tunables(struct mrsas_softc *sc) { char tmpstr[80]; /* XXX default to some debugging for now */ sc->mrsas_debug = MRSAS_FAULT; sc->mrsas_io_timeout = MRSAS_IO_TIMEOUT; sc->mrsas_fw_fault_check_delay = 1; sc->reset_count = 0; sc->reset_in_progress = 0; sc->block_sync_cache = 0; /* * Grab the global variables. */ TUNABLE_INT_FETCH("hw.mrsas.debug_level", &sc->mrsas_debug); /* * Grab the global variables. */ TUNABLE_INT_FETCH("hw.mrsas.lb_pending_cmds", &sc->lb_pending_cmds); /* Grab the unit-instance variables */ snprintf(tmpstr, sizeof(tmpstr), "dev.mrsas.%d.debug_level", device_get_unit(sc->mrsas_dev)); TUNABLE_INT_FETCH(tmpstr, &sc->mrsas_debug); } /* * mrsas_alloc_evt_log_info cmd: Allocates memory to get event log information. * Used to get sequence number at driver load time. * input: Adapter soft state * * Allocates DMAable memory for the event log info internal command. */ int mrsas_alloc_evt_log_info_cmd(struct mrsas_softc *sc) { int el_info_size; /* Allocate get event log info command */ el_info_size = sizeof(struct mrsas_evt_log_info); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, el_info_size, 1, el_info_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->el_info_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate event log info tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->el_info_tag, (void **)&sc->el_info_mem, BUS_DMA_NOWAIT, &sc->el_info_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate event log info cmd mem\n"); return (ENOMEM); } if (bus_dmamap_load(sc->el_info_tag, sc->el_info_dmamap, sc->el_info_mem, el_info_size, mrsas_addr_cb, &sc->el_info_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load event log info cmd mem\n"); return (ENOMEM); } memset(sc->el_info_mem, 0, el_info_size); return (0); } /* * mrsas_free_evt_info_cmd: Free memory for Event log info command * input: Adapter soft state * * Deallocates memory for the event log info internal command. */ void mrsas_free_evt_log_info_cmd(struct mrsas_softc *sc) { if (sc->el_info_phys_addr) bus_dmamap_unload(sc->el_info_tag, sc->el_info_dmamap); if (sc->el_info_mem != NULL) bus_dmamem_free(sc->el_info_tag, sc->el_info_mem, sc->el_info_dmamap); if (sc->el_info_tag != NULL) bus_dma_tag_destroy(sc->el_info_tag); } /* * mrsas_get_seq_num: Get latest event sequence number * @sc: Adapter soft state * @eli: Firmware event log sequence number information. * * Firmware maintains a log of all events in a non-volatile area. * Driver get the sequence number using DCMD * "MR_DCMD_CTRL_EVENT_GET_INFO" at driver load time. */ static int mrsas_get_seq_num(struct mrsas_softc *sc, struct mrsas_evt_log_info *eli) { struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; u_int8_t do_ocr = 1, retcode = 0; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (mrsas_alloc_evt_log_info_cmd(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot allocate evt log info cmd\n"); mrsas_release_mfi_cmd(cmd); return -ENOMEM; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct mrsas_evt_log_info); dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; dcmd->sgl.sge32[0].phys_addr = sc->el_info_phys_addr; dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_log_info); retcode = mrsas_issue_blocked_cmd(sc, cmd); if (retcode == ETIMEDOUT) goto dcmd_timeout; do_ocr = 0; /* * Copy the data back into callers buffer */ memcpy(eli, sc->el_info_mem, sizeof(struct mrsas_evt_log_info)); mrsas_free_evt_log_info_cmd(sc); dcmd_timeout: if (do_ocr) sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; else mrsas_release_mfi_cmd(cmd); return retcode; } /* * mrsas_register_aen: Register for asynchronous event notification * @sc: Adapter soft state * @seq_num: Starting sequence number * @class_locale: Class of the event * * This function subscribes for events beyond the @seq_num * and type @class_locale. * */ static int mrsas_register_aen(struct mrsas_softc *sc, u_int32_t seq_num, u_int32_t class_locale_word) { int ret_val; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; union mrsas_evt_class_locale curr_aen; union mrsas_evt_class_locale prev_aen; /* * If there an AEN pending already (aen_cmd), check if the * class_locale of that pending AEN is inclusive of the new AEN * request we currently have. If it is, then we don't have to do * anything. In other words, whichever events the current AEN request * is subscribing to, have already been subscribed to. If the old_cmd * is _not_ inclusive, then we have to abort that command, form a * class_locale that is superset of both old and current and re-issue * to the FW */ curr_aen.word = class_locale_word; if (sc->aen_cmd) { prev_aen.word = sc->aen_cmd->frame->dcmd.mbox.w[1]; /* * A class whose enum value is smaller is inclusive of all * higher values. If a PROGRESS (= -1) was previously * registered, then a new registration requests for higher * classes need not be sent to FW. They are automatically * included. Locale numbers don't have such hierarchy. They * are bitmap values */ if ((prev_aen.members.class <= curr_aen.members.class) && !((prev_aen.members.locale & curr_aen.members.locale) ^ curr_aen.members.locale)) { /* * Previously issued event registration includes * current request. Nothing to do. */ return 0; } else { curr_aen.members.locale |= prev_aen.members.locale; if (prev_aen.members.class < curr_aen.members.class) curr_aen.members.class = prev_aen.members.class; sc->aen_cmd->abort_aen = 1; ret_val = mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); if (ret_val) { printf("mrsas: Failed to abort previous AEN command\n"); return ret_val; } else sc->aen_cmd = NULL; } } cmd = mrsas_get_mfi_cmd(sc); if (!cmd) return ENOMEM; dcmd = &cmd->frame->dcmd; memset(sc->evt_detail_mem, 0, sizeof(struct mrsas_evt_detail)); /* * Prepare DCMD for aen registration */ memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct mrsas_evt_detail); dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; dcmd->mbox.w[0] = seq_num; sc->last_seq_num = seq_num; dcmd->mbox.w[1] = curr_aen.word; dcmd->sgl.sge32[0].phys_addr = (u_int32_t)sc->evt_detail_phys_addr; dcmd->sgl.sge32[0].length = sizeof(struct mrsas_evt_detail); if (sc->aen_cmd != NULL) { mrsas_release_mfi_cmd(cmd); return 0; } /* * Store reference to the cmd used to register for AEN. When an * application wants us to register for AEN, we have to abort this * cmd and re-register with a new EVENT LOCALE supplied by that app */ sc->aen_cmd = cmd; /* * Issue the aen registration frame */ if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot issue AEN DCMD command.\n"); return (1); } return 0; } /* * mrsas_start_aen: Subscribes to AEN during driver load time * @instance: Adapter soft state */ static int mrsas_start_aen(struct mrsas_softc *sc) { struct mrsas_evt_log_info eli; union mrsas_evt_class_locale class_locale; /* Get the latest sequence number from FW */ memset(&eli, 0, sizeof(eli)); if (mrsas_get_seq_num(sc, &eli)) return -1; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; return mrsas_register_aen(sc, eli.newest_seq_num + 1, class_locale.word); } /* * mrsas_setup_msix: Allocate MSI-x vectors * @sc: adapter soft state */ static int mrsas_setup_msix(struct mrsas_softc *sc) { int i; for (i = 0; i < sc->msix_vectors; i++) { sc->irq_context[i].sc = sc; sc->irq_context[i].MSIxIndex = i; sc->irq_id[i] = i + 1; sc->mrsas_irq[i] = bus_alloc_resource_any (sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[i] ,RF_ACTIVE); if (sc->mrsas_irq[i] == NULL) { device_printf(sc->mrsas_dev, "Can't allocate MSI-x\n"); goto irq_alloc_failed; } if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[i], INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, &sc->irq_context[i], &sc->intr_handle[i])) { device_printf(sc->mrsas_dev, "Cannot set up MSI-x interrupt handler\n"); goto irq_alloc_failed; } } return SUCCESS; irq_alloc_failed: mrsas_teardown_intr(sc); return (FAIL); } /* * mrsas_allocate_msix: Setup MSI-x vectors * @sc: adapter soft state */ static int mrsas_allocate_msix(struct mrsas_softc *sc) { if (pci_alloc_msix(sc->mrsas_dev, &sc->msix_vectors) == 0) { device_printf(sc->mrsas_dev, "Using MSI-X with %d number" " of vectors\n", sc->msix_vectors); } else { device_printf(sc->mrsas_dev, "MSI-x setup failed\n"); goto irq_alloc_failed; } return SUCCESS; irq_alloc_failed: mrsas_teardown_intr(sc); return (FAIL); } /* * mrsas_attach: PCI entry point * input: pointer to device struct * * Performs setup of PCI and registers, initializes mutexes and linked lists, * registers interrupts and CAM, and initializes the adapter/controller to * its proper state. */ static int mrsas_attach(device_t dev) { struct mrsas_softc *sc = device_get_softc(dev); uint32_t cmd, bar, error; memset(sc, 0, sizeof(struct mrsas_softc)); /* Look up our softc and initialize its fields. */ sc->mrsas_dev = dev; sc->device_id = pci_get_device(dev); if ((sc->device_id == MRSAS_INVADER) || (sc->device_id == MRSAS_FURY) || (sc->device_id == MRSAS_INTRUDER) || (sc->device_id == MRSAS_INTRUDER_24) || (sc->device_id == MRSAS_CUTLASS_52) || (sc->device_id == MRSAS_CUTLASS_53)) { sc->mrsas_gen3_ctrl = 1; } mrsas_get_tunables(sc); /* * Set up PCI and registers */ cmd = pci_read_config(dev, PCIR_COMMAND, 2); if ((cmd & PCIM_CMD_PORTEN) == 0) { return (ENXIO); } /* Force the busmaster enable bit on. */ cmd |= PCIM_CMD_BUSMASTEREN; pci_write_config(dev, PCIR_COMMAND, cmd, 2); bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4); sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */ if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &(sc->reg_res_id), RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate PCI registers\n"); goto attach_fail; } sc->bus_tag = rman_get_bustag(sc->reg_res); sc->bus_handle = rman_get_bushandle(sc->reg_res); /* Intialize mutexes */ mtx_init(&sc->sim_lock, "mrsas_sim_lock", NULL, MTX_DEF); mtx_init(&sc->pci_lock, "mrsas_pci_lock", NULL, MTX_DEF); mtx_init(&sc->io_lock, "mrsas_io_lock", NULL, MTX_DEF); mtx_init(&sc->aen_lock, "mrsas_aen_lock", NULL, MTX_DEF); mtx_init(&sc->ioctl_lock, "mrsas_ioctl_lock", NULL, MTX_SPIN); mtx_init(&sc->mpt_cmd_pool_lock, "mrsas_mpt_cmd_pool_lock", NULL, MTX_DEF); mtx_init(&sc->mfi_cmd_pool_lock, "mrsas_mfi_cmd_pool_lock", NULL, MTX_DEF); mtx_init(&sc->raidmap_lock, "mrsas_raidmap_lock", NULL, MTX_DEF); /* Intialize linked list */ TAILQ_INIT(&sc->mrsas_mpt_cmd_list_head); TAILQ_INIT(&sc->mrsas_mfi_cmd_list_head); mrsas_atomic_set(&sc->fw_outstanding, 0); mrsas_atomic_set(&sc->target_reset_outstanding, 0); sc->io_cmds_highwater = 0; sc->adprecovery = MRSAS_HBA_OPERATIONAL; sc->UnevenSpanSupport = 0; sc->msix_enable = 0; /* Initialize Firmware */ if (mrsas_init_fw(sc) != SUCCESS) { goto attach_fail_fw; } /* Register mrsas to CAM layer */ if ((mrsas_cam_attach(sc) != SUCCESS)) { goto attach_fail_cam; } /* Register IRQs */ if (mrsas_setup_irq(sc) != SUCCESS) { goto attach_fail_irq; } error = mrsas_kproc_create(mrsas_ocr_thread, sc, &sc->ocr_thread, 0, 0, "mrsas_ocr%d", device_get_unit(sc->mrsas_dev)); if (error) { device_printf(sc->mrsas_dev, "Error %d starting OCR thread\n", error); goto attach_fail_ocr_thread; } /* * After FW initialization and OCR thread creation * we will defer the cdev creation, AEN setup on ICH callback */ sc->mrsas_ich.ich_func = mrsas_ich_startup; sc->mrsas_ich.ich_arg = sc; if (config_intrhook_establish(&sc->mrsas_ich) != 0) { device_printf(sc->mrsas_dev, "Config hook is already established\n"); } mrsas_setup_sysctl(sc); return SUCCESS; attach_fail_ocr_thread: if (sc->ocr_thread_active) wakeup(&sc->ocr_chan); attach_fail_irq: mrsas_teardown_intr(sc); attach_fail_cam: mrsas_cam_detach(sc); attach_fail_fw: /* if MSIX vector is allocated and FW Init FAILED then release MSIX */ if (sc->msix_enable == 1) pci_release_msi(sc->mrsas_dev); mrsas_free_mem(sc); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->aen_lock); mtx_destroy(&sc->pci_lock); mtx_destroy(&sc->io_lock); mtx_destroy(&sc->ioctl_lock); mtx_destroy(&sc->mpt_cmd_pool_lock); mtx_destroy(&sc->mfi_cmd_pool_lock); mtx_destroy(&sc->raidmap_lock); attach_fail: if (sc->reg_res) { bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); } return (ENXIO); } /* * Interrupt config hook */ static void mrsas_ich_startup(void *arg) { struct mrsas_softc *sc = (struct mrsas_softc *)arg; /* * Intialize a counting Semaphore to take care no. of concurrent IOCTLs */ sema_init(&sc->ioctl_count_sema, MRSAS_MAX_IOCTL_CMDS, IOCTL_SEMA_DESCRIPTION); /* Create a /dev entry for mrsas controller. */ sc->mrsas_cdev = make_dev(&mrsas_cdevsw, device_get_unit(sc->mrsas_dev), UID_ROOT, GID_OPERATOR, (S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP), "mrsas%u", device_get_unit(sc->mrsas_dev)); if (device_get_unit(sc->mrsas_dev) == 0) { make_dev_alias_p(MAKEDEV_CHECKNAME, &sc->mrsas_linux_emulator_cdev, sc->mrsas_cdev, "megaraid_sas_ioctl_node"); } if (sc->mrsas_cdev) sc->mrsas_cdev->si_drv1 = sc; /* * Add this controller to mrsas_mgmt_info structure so that it can be * exported to management applications */ if (device_get_unit(sc->mrsas_dev) == 0) memset(&mrsas_mgmt_info, 0, sizeof(mrsas_mgmt_info)); mrsas_mgmt_info.count++; mrsas_mgmt_info.sc_ptr[mrsas_mgmt_info.max_index] = sc; mrsas_mgmt_info.max_index++; /* Enable Interrupts */ mrsas_enable_intr(sc); /* Initiate AEN (Asynchronous Event Notification) */ if (mrsas_start_aen(sc)) { device_printf(sc->mrsas_dev, "Error: AEN registration FAILED !!! " "Further events from the controller will not be communicated.\n" "Either there is some problem in the controller" "or the controller does not support AEN.\n" "Please contact to the SUPPORT TEAM if the problem persists\n"); } if (sc->mrsas_ich.ich_arg != NULL) { device_printf(sc->mrsas_dev, "Disestablish mrsas intr hook\n"); config_intrhook_disestablish(&sc->mrsas_ich); sc->mrsas_ich.ich_arg = NULL; } } /* * mrsas_detach: De-allocates and teardown resources * input: pointer to device struct * * This function is the entry point for device disconnect and detach. * It performs memory de-allocations, shutdown of the controller and various * teardown and destroy resource functions. */ static int mrsas_detach(device_t dev) { struct mrsas_softc *sc; int i = 0; sc = device_get_softc(dev); sc->remove_in_progress = 1; /* Destroy the character device so no other IOCTL will be handled */ if ((device_get_unit(dev) == 0) && sc->mrsas_linux_emulator_cdev) destroy_dev(sc->mrsas_linux_emulator_cdev); destroy_dev(sc->mrsas_cdev); /* * Take the instance off the instance array. Note that we will not * decrement the max_index. We let this array be sparse array */ for (i = 0; i < mrsas_mgmt_info.max_index; i++) { if (mrsas_mgmt_info.sc_ptr[i] == sc) { mrsas_mgmt_info.count--; mrsas_mgmt_info.sc_ptr[i] = NULL; break; } } if (sc->ocr_thread_active) wakeup(&sc->ocr_chan); while (sc->reset_in_progress) { i++; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_INFO, "[%2d]waiting for OCR to be finished from %s\n", i, __func__); } pause("mr_shutdown", hz); } i = 0; while (sc->ocr_thread_active) { i++; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_INFO, "[%2d]waiting for " "mrsas_ocr thread to quit ocr %d\n", i, sc->ocr_thread_active); } pause("mr_shutdown", hz); } mrsas_flush_cache(sc); mrsas_shutdown_ctlr(sc, MR_DCMD_CTRL_SHUTDOWN); mrsas_disable_intr(sc); mrsas_cam_detach(sc); mrsas_teardown_intr(sc); mrsas_free_mem(sc); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->aen_lock); mtx_destroy(&sc->pci_lock); mtx_destroy(&sc->io_lock); mtx_destroy(&sc->ioctl_lock); mtx_destroy(&sc->mpt_cmd_pool_lock); mtx_destroy(&sc->mfi_cmd_pool_lock); mtx_destroy(&sc->raidmap_lock); /* Wait for all the semaphores to be released */ while (sema_value(&sc->ioctl_count_sema) != MRSAS_MAX_IOCTL_CMDS) pause("mr_shutdown", hz); /* Destroy the counting semaphore created for Ioctl */ sema_destroy(&sc->ioctl_count_sema); if (sc->reg_res) { bus_release_resource(sc->mrsas_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res); } if (sc->sysctl_tree != NULL) sysctl_ctx_free(&sc->sysctl_ctx); return (0); } /* * mrsas_free_mem: Frees allocated memory * input: Adapter instance soft state * * This function is called from mrsas_detach() to free previously allocated * memory. */ void mrsas_free_mem(struct mrsas_softc *sc) { int i; u_int32_t max_cmd; struct mrsas_mfi_cmd *mfi_cmd; struct mrsas_mpt_cmd *mpt_cmd; /* * Free RAID map memory */ for (i = 0; i < 2; i++) { if (sc->raidmap_phys_addr[i]) bus_dmamap_unload(sc->raidmap_tag[i], sc->raidmap_dmamap[i]); if (sc->raidmap_mem[i] != NULL) bus_dmamem_free(sc->raidmap_tag[i], sc->raidmap_mem[i], sc->raidmap_dmamap[i]); if (sc->raidmap_tag[i] != NULL) bus_dma_tag_destroy(sc->raidmap_tag[i]); if (sc->ld_drv_map[i] != NULL) free(sc->ld_drv_map[i], M_MRSAS); } for (i = 0; i < 2; i++) { if (sc->jbodmap_phys_addr[i]) bus_dmamap_unload(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i]); if (sc->jbodmap_mem[i] != NULL) bus_dmamem_free(sc->jbodmap_tag[i], sc->jbodmap_mem[i], sc->jbodmap_dmamap[i]); if (sc->jbodmap_tag[i] != NULL) bus_dma_tag_destroy(sc->jbodmap_tag[i]); } /* * Free version buffer memory */ if (sc->verbuf_phys_addr) bus_dmamap_unload(sc->verbuf_tag, sc->verbuf_dmamap); if (sc->verbuf_mem != NULL) bus_dmamem_free(sc->verbuf_tag, sc->verbuf_mem, sc->verbuf_dmamap); if (sc->verbuf_tag != NULL) bus_dma_tag_destroy(sc->verbuf_tag); /* * Free sense buffer memory */ if (sc->sense_phys_addr) bus_dmamap_unload(sc->sense_tag, sc->sense_dmamap); if (sc->sense_mem != NULL) bus_dmamem_free(sc->sense_tag, sc->sense_mem, sc->sense_dmamap); if (sc->sense_tag != NULL) bus_dma_tag_destroy(sc->sense_tag); /* * Free chain frame memory */ if (sc->chain_frame_phys_addr) bus_dmamap_unload(sc->chain_frame_tag, sc->chain_frame_dmamap); if (sc->chain_frame_mem != NULL) bus_dmamem_free(sc->chain_frame_tag, sc->chain_frame_mem, sc->chain_frame_dmamap); if (sc->chain_frame_tag != NULL) bus_dma_tag_destroy(sc->chain_frame_tag); /* * Free IO Request memory */ if (sc->io_request_phys_addr) bus_dmamap_unload(sc->io_request_tag, sc->io_request_dmamap); if (sc->io_request_mem != NULL) bus_dmamem_free(sc->io_request_tag, sc->io_request_mem, sc->io_request_dmamap); if (sc->io_request_tag != NULL) bus_dma_tag_destroy(sc->io_request_tag); /* * Free Reply Descriptor memory */ if (sc->reply_desc_phys_addr) bus_dmamap_unload(sc->reply_desc_tag, sc->reply_desc_dmamap); if (sc->reply_desc_mem != NULL) bus_dmamem_free(sc->reply_desc_tag, sc->reply_desc_mem, sc->reply_desc_dmamap); if (sc->reply_desc_tag != NULL) bus_dma_tag_destroy(sc->reply_desc_tag); /* * Free event detail memory */ if (sc->evt_detail_phys_addr) bus_dmamap_unload(sc->evt_detail_tag, sc->evt_detail_dmamap); if (sc->evt_detail_mem != NULL) bus_dmamem_free(sc->evt_detail_tag, sc->evt_detail_mem, sc->evt_detail_dmamap); if (sc->evt_detail_tag != NULL) bus_dma_tag_destroy(sc->evt_detail_tag); /* * Free MFI frames */ if (sc->mfi_cmd_list) { for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { mfi_cmd = sc->mfi_cmd_list[i]; mrsas_free_frame(sc, mfi_cmd); } } if (sc->mficmd_frame_tag != NULL) bus_dma_tag_destroy(sc->mficmd_frame_tag); /* * Free MPT internal command list */ max_cmd = sc->max_fw_cmds; if (sc->mpt_cmd_list) { for (i = 0; i < max_cmd; i++) { mpt_cmd = sc->mpt_cmd_list[i]; bus_dmamap_destroy(sc->data_tag, mpt_cmd->data_dmamap); free(sc->mpt_cmd_list[i], M_MRSAS); } free(sc->mpt_cmd_list, M_MRSAS); sc->mpt_cmd_list = NULL; } /* * Free MFI internal command list */ if (sc->mfi_cmd_list) { for (i = 0; i < MRSAS_MAX_MFI_CMDS; i++) { free(sc->mfi_cmd_list[i], M_MRSAS); } free(sc->mfi_cmd_list, M_MRSAS); sc->mfi_cmd_list = NULL; } /* * Free request descriptor memory */ free(sc->req_desc, M_MRSAS); sc->req_desc = NULL; /* * Destroy parent tag */ if (sc->mrsas_parent_tag != NULL) bus_dma_tag_destroy(sc->mrsas_parent_tag); /* * Free ctrl_info memory */ if (sc->ctrl_info != NULL) free(sc->ctrl_info, M_MRSAS); } /* * mrsas_teardown_intr: Teardown interrupt * input: Adapter instance soft state * * This function is called from mrsas_detach() to teardown and release bus * interrupt resourse. */ void mrsas_teardown_intr(struct mrsas_softc *sc) { int i; if (!sc->msix_enable) { if (sc->intr_handle[0]) bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[0], sc->intr_handle[0]); if (sc->mrsas_irq[0] != NULL) bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id[0], sc->mrsas_irq[0]); sc->intr_handle[0] = NULL; } else { for (i = 0; i < sc->msix_vectors; i++) { if (sc->intr_handle[i]) bus_teardown_intr(sc->mrsas_dev, sc->mrsas_irq[i], sc->intr_handle[i]); if (sc->mrsas_irq[i] != NULL) bus_release_resource(sc->mrsas_dev, SYS_RES_IRQ, sc->irq_id[i], sc->mrsas_irq[i]); sc->intr_handle[i] = NULL; } pci_release_msi(sc->mrsas_dev); } } /* * mrsas_suspend: Suspend entry point * input: Device struct pointer * * This function is the entry point for system suspend from the OS. */ static int mrsas_suspend(device_t dev) { /* This will be filled when the driver will have hibernation support */ return (0); } /* * mrsas_resume: Resume entry point * input: Device struct pointer * * This function is the entry point for system resume from the OS. */ static int mrsas_resume(device_t dev) { /* This will be filled when the driver will have hibernation support */ return (0); } /** * mrsas_get_softc_instance: Find softc instance based on cmd type * * This function will return softc instance based on cmd type. * In some case, application fire ioctl on required management instance and * do not provide host_no. Use cdev->si_drv1 to get softc instance for those * case, else get the softc instance from host_no provided by application in * user data. */ static struct mrsas_softc * mrsas_get_softc_instance(struct cdev *dev, u_long cmd, caddr_t arg) { struct mrsas_softc *sc = NULL; struct mrsas_iocpacket *user_ioc = (struct mrsas_iocpacket *)arg; if (cmd == MRSAS_IOC_GET_PCI_INFO) { sc = dev->si_drv1; } else { /* * get the Host number & the softc from data sent by the * Application */ sc = mrsas_mgmt_info.sc_ptr[user_ioc->host_no]; if (sc == NULL) printf("There is no Controller number %d\n", user_ioc->host_no); else if (user_ioc->host_no >= mrsas_mgmt_info.max_index) mrsas_dprint(sc, MRSAS_FAULT, "Invalid Controller number %d\n", user_ioc->host_no); } return sc; } /* * mrsas_ioctl: IOCtl commands entry point. * * This function is the entry point for IOCtls from the OS. It calls the * appropriate function for processing depending on the command received. */ static int mrsas_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { struct mrsas_softc *sc; int ret = 0, i = 0; MRSAS_DRV_PCI_INFORMATION *pciDrvInfo; sc = mrsas_get_softc_instance(dev, cmd, arg); if (!sc) return ENOENT; if (sc->remove_in_progress || (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR)) { mrsas_dprint(sc, MRSAS_INFO, "Either driver remove or shutdown called or " "HW is in unrecoverable critical error state.\n"); return ENOENT; } mtx_lock_spin(&sc->ioctl_lock); if (!sc->reset_in_progress) { mtx_unlock_spin(&sc->ioctl_lock); goto do_ioctl; } mtx_unlock_spin(&sc->ioctl_lock); while (sc->reset_in_progress) { i++; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_INFO, "[%2d]waiting for OCR to be finished from %s\n", i, __func__); } pause("mr_ioctl", hz); } do_ioctl: switch (cmd) { case MRSAS_IOC_FIRMWARE_PASS_THROUGH64: #ifdef COMPAT_FREEBSD32 case MRSAS_IOC_FIRMWARE_PASS_THROUGH32: #endif /* * Decrement the Ioctl counting Semaphore before getting an * mfi command */ sema_wait(&sc->ioctl_count_sema); ret = mrsas_passthru(sc, (void *)arg, cmd); /* Increment the Ioctl counting semaphore value */ sema_post(&sc->ioctl_count_sema); break; case MRSAS_IOC_SCAN_BUS: ret = mrsas_bus_scan(sc); break; case MRSAS_IOC_GET_PCI_INFO: pciDrvInfo = (MRSAS_DRV_PCI_INFORMATION *) arg; memset(pciDrvInfo, 0, sizeof(MRSAS_DRV_PCI_INFORMATION)); pciDrvInfo->busNumber = pci_get_bus(sc->mrsas_dev); pciDrvInfo->deviceNumber = pci_get_slot(sc->mrsas_dev); pciDrvInfo->functionNumber = pci_get_function(sc->mrsas_dev); pciDrvInfo->domainID = pci_get_domain(sc->mrsas_dev); mrsas_dprint(sc, MRSAS_INFO, "pci bus no: %d," "pci device no: %d, pci function no: %d," "pci domain ID: %d\n", pciDrvInfo->busNumber, pciDrvInfo->deviceNumber, pciDrvInfo->functionNumber, pciDrvInfo->domainID); ret = 0; break; default: mrsas_dprint(sc, MRSAS_TRACE, "IOCTL command 0x%lx is not handled\n", cmd); ret = ENOENT; } return (ret); } /* * mrsas_poll: poll entry point for mrsas driver fd * * This function is the entry point for poll from the OS. It waits for some AEN * events to be triggered from the controller and notifies back. */ static int mrsas_poll(struct cdev *dev, int poll_events, struct thread *td) { struct mrsas_softc *sc; int revents = 0; sc = dev->si_drv1; if (poll_events & (POLLIN | POLLRDNORM)) { if (sc->mrsas_aen_triggered) { revents |= poll_events & (POLLIN | POLLRDNORM); } } if (revents == 0) { if (poll_events & (POLLIN | POLLRDNORM)) { mtx_lock(&sc->aen_lock); sc->mrsas_poll_waiting = 1; selrecord(td, &sc->mrsas_select); mtx_unlock(&sc->aen_lock); } } return revents; } /* * mrsas_setup_irq: Set up interrupt * input: Adapter instance soft state * * This function sets up interrupts as a bus resource, with flags indicating * resource permitting contemporaneous sharing and for resource to activate * atomically. */ static int mrsas_setup_irq(struct mrsas_softc *sc) { if (sc->msix_enable && (mrsas_setup_msix(sc) == SUCCESS)) device_printf(sc->mrsas_dev, "MSI-x interrupts setup success\n"); else { device_printf(sc->mrsas_dev, "Fall back to legacy interrupt\n"); sc->irq_context[0].sc = sc; sc->irq_context[0].MSIxIndex = 0; sc->irq_id[0] = 0; sc->mrsas_irq[0] = bus_alloc_resource_any(sc->mrsas_dev, SYS_RES_IRQ, &sc->irq_id[0], RF_SHAREABLE | RF_ACTIVE); if (sc->mrsas_irq[0] == NULL) { device_printf(sc->mrsas_dev, "Cannot allocate legcay" "interrupt\n"); return (FAIL); } if (bus_setup_intr(sc->mrsas_dev, sc->mrsas_irq[0], INTR_MPSAFE | INTR_TYPE_CAM, NULL, mrsas_isr, &sc->irq_context[0], &sc->intr_handle[0])) { device_printf(sc->mrsas_dev, "Cannot set up legacy" "interrupt\n"); return (FAIL); } } return (0); } /* * mrsas_isr: ISR entry point * input: argument pointer * * This function is the interrupt service routine entry point. There are two * types of interrupts, state change interrupt and response interrupt. If an * interrupt is not ours, we just return. */ void mrsas_isr(void *arg) { struct mrsas_irq_context *irq_context = (struct mrsas_irq_context *)arg; struct mrsas_softc *sc = irq_context->sc; int status = 0; if (sc->mask_interrupts) return; if (!sc->msix_vectors) { status = mrsas_clear_intr(sc); if (!status) return; } /* If we are resetting, bail */ if (mrsas_test_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags)) { printf(" Entered into ISR when OCR is going active. \n"); mrsas_clear_intr(sc); return; } /* Process for reply request and clear response interrupt */ if (mrsas_complete_cmd(sc, irq_context->MSIxIndex) != SUCCESS) mrsas_clear_intr(sc); return; } /* * mrsas_complete_cmd: Process reply request * input: Adapter instance soft state * * This function is called from mrsas_isr() to process reply request and clear * response interrupt. Processing of the reply request entails walking * through the reply descriptor array for the command request pended from * Firmware. We look at the Function field to determine the command type and * perform the appropriate action. Before we return, we clear the response * interrupt. */ int mrsas_complete_cmd(struct mrsas_softc *sc, u_int32_t MSIxIndex) { Mpi2ReplyDescriptorsUnion_t *desc; MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *reply_desc; MRSAS_RAID_SCSI_IO_REQUEST *scsi_io_req; struct mrsas_mpt_cmd *cmd_mpt; struct mrsas_mfi_cmd *cmd_mfi; u_int8_t reply_descript_type; u_int16_t smid, num_completed; u_int8_t status, extStatus; union desc_value desc_val; PLD_LOAD_BALANCE_INFO lbinfo; u_int32_t device_id; int threshold_reply_count = 0; #if TM_DEBUG MR_TASK_MANAGE_REQUEST *mr_tm_req; MPI2_SCSI_TASK_MANAGE_REQUEST *mpi_tm_req; #endif /* If we have a hardware error, not need to continue */ if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) return (DONE); desc = sc->reply_desc_mem; desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)) + sc->last_reply_idx[MSIxIndex]; reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; desc_val.word = desc->Words; num_completed = 0; reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; /* Find our reply descriptor for the command and process */ while ((desc_val.u.low != 0xFFFFFFFF) && (desc_val.u.high != 0xFFFFFFFF)) { smid = reply_desc->SMID; cmd_mpt = sc->mpt_cmd_list[smid - 1]; scsi_io_req = (MRSAS_RAID_SCSI_IO_REQUEST *) cmd_mpt->io_request; status = scsi_io_req->RaidContext.status; extStatus = scsi_io_req->RaidContext.exStatus; switch (scsi_io_req->Function) { case MPI2_FUNCTION_SCSI_TASK_MGMT: #if TM_DEBUG mr_tm_req = (MR_TASK_MANAGE_REQUEST *) cmd_mpt->io_request; mpi_tm_req = (MPI2_SCSI_TASK_MANAGE_REQUEST *) &mr_tm_req->TmRequest; device_printf(sc->mrsas_dev, "TM completion type 0x%X, " "TaskMID: 0x%X", mpi_tm_req->TaskType, mpi_tm_req->TaskMID); #endif wakeup_one((void *)&sc->ocr_chan); break; case MPI2_FUNCTION_SCSI_IO_REQUEST: /* Fast Path IO. */ device_id = cmd_mpt->ccb_ptr->ccb_h.target_id; lbinfo = &sc->load_balance_info[device_id]; if (cmd_mpt->load_balance == MRSAS_LOAD_BALANCE_FLAG) { mrsas_atomic_dec(&lbinfo->scsi_pending_cmds[cmd_mpt->pd_r1_lb]); cmd_mpt->load_balance &= ~MRSAS_LOAD_BALANCE_FLAG; } /* Fall thru and complete IO */ case MRSAS_MPI2_FUNCTION_LD_IO_REQUEST: mrsas_map_mpt_cmd_status(cmd_mpt, status, extStatus); mrsas_cmd_done(sc, cmd_mpt); scsi_io_req->RaidContext.status = 0; scsi_io_req->RaidContext.exStatus = 0; mrsas_atomic_dec(&sc->fw_outstanding); break; case MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /* MFI command */ cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; /* * Make sure NOT TO release the mfi command from the called * function's context if it is fired with issue_polled call. * And also make sure that the issue_polled call should only be * used if INTERRUPT IS DISABLED. */ if (cmd_mfi->frame->hdr.flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) mrsas_release_mfi_cmd(cmd_mfi); else mrsas_complete_mptmfi_passthru(sc, cmd_mfi, status); break; } sc->last_reply_idx[MSIxIndex]++; if (sc->last_reply_idx[MSIxIndex] >= sc->reply_q_depth) sc->last_reply_idx[MSIxIndex] = 0; desc->Words = ~((uint64_t)0x00); /* set it back to all * 0xFFFFFFFFs */ num_completed++; threshold_reply_count++; /* Get the next reply descriptor */ if (!sc->last_reply_idx[MSIxIndex]) { desc = sc->reply_desc_mem; desc += ((MSIxIndex * sc->reply_alloc_sz) / sizeof(MPI2_REPLY_DESCRIPTORS_UNION)); } else desc++; reply_desc = (MPI2_SCSI_IO_SUCCESS_REPLY_DESCRIPTOR *) desc; desc_val.word = desc->Words; reply_descript_type = reply_desc->ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) break; /* * Write to reply post index after completing threshold reply * count and still there are more replies in reply queue * pending to be completed. */ if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) { if (sc->msix_enable) { if (sc->mrsas_gen3_ctrl) mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], ((MSIxIndex & 0x7) << 24) | sc->last_reply_idx[MSIxIndex]); else mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | sc->last_reply_idx[MSIxIndex]); } else mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index), sc->last_reply_idx[0]); threshold_reply_count = 0; } } /* No match, just return */ if (num_completed == 0) return (DONE); /* Clear response interrupt */ if (sc->msix_enable) { if (sc->mrsas_gen3_ctrl) { mrsas_write_reg(sc, sc->msix_reg_offset[MSIxIndex / 8], ((MSIxIndex & 0x7) << 24) | sc->last_reply_idx[MSIxIndex]); } else mrsas_write_reg(sc, sc->msix_reg_offset[0], (MSIxIndex << 24) | sc->last_reply_idx[MSIxIndex]); } else mrsas_write_reg(sc, offsetof(mrsas_reg_set, reply_post_host_index), sc->last_reply_idx[0]); return (0); } /* * mrsas_map_mpt_cmd_status: Allocate DMAable memory. * input: Adapter instance soft state * * This function is called from mrsas_complete_cmd(), for LD IO and FastPath IO. * It checks the command status and maps the appropriate CAM status for the * CCB. */ void mrsas_map_mpt_cmd_status(struct mrsas_mpt_cmd *cmd, u_int8_t status, u_int8_t extStatus) { struct mrsas_softc *sc = cmd->sc; u_int8_t *sense_data; switch (status) { case MFI_STAT_OK: cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP; break; case MFI_STAT_SCSI_IO_FAILED: case MFI_STAT_SCSI_DONE_WITH_ERROR: cmd->ccb_ptr->ccb_h.status = CAM_SCSI_STATUS_ERROR; sense_data = (u_int8_t *)&cmd->ccb_ptr->csio.sense_data; if (sense_data) { /* For now just copy 18 bytes back */ memcpy(sense_data, cmd->sense, 18); cmd->ccb_ptr->csio.sense_len = 18; cmd->ccb_ptr->ccb_h.status |= CAM_AUTOSNS_VALID; } break; case MFI_STAT_LD_OFFLINE: case MFI_STAT_DEVICE_NOT_FOUND: if (cmd->ccb_ptr->ccb_h.target_lun) cmd->ccb_ptr->ccb_h.status |= CAM_LUN_INVALID; else cmd->ccb_ptr->ccb_h.status |= CAM_DEV_NOT_THERE; break; case MFI_STAT_CONFIG_SEQ_MISMATCH: cmd->ccb_ptr->ccb_h.status |= CAM_REQUEUE_REQ; break; default: device_printf(sc->mrsas_dev, "FW cmd complete status %x\n", status); cmd->ccb_ptr->ccb_h.status = CAM_REQ_CMP_ERR; cmd->ccb_ptr->csio.scsi_status = status; } return; } /* * mrsas_alloc_mem: Allocate DMAable memory * input: Adapter instance soft state * * This function creates the parent DMA tag and allocates DMAable memory. DMA * tag describes constraints of DMA mapping. Memory allocated is mapped into * Kernel virtual address. Callback argument is physical memory address. */ static int mrsas_alloc_mem(struct mrsas_softc *sc) { u_int32_t verbuf_size, io_req_size, reply_desc_size, sense_size, chain_frame_size, evt_detail_size, count; /* * Allocate parent DMA tag */ if (bus_dma_tag_create(NULL, /* parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MAXPHYS, /* maxsize */ sc->max_num_sge, /* nsegments */ MAXPHYS, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mrsas_parent_tag /* tag */ )) { device_printf(sc->mrsas_dev, "Cannot allocate parent DMA tag\n"); return (ENOMEM); } /* * Allocate for version buffer */ verbuf_size = MRSAS_MAX_NAME_LENGTH * (sizeof(bus_addr_t)); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, verbuf_size, 1, verbuf_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->verbuf_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate verbuf DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->verbuf_tag, (void **)&sc->verbuf_mem, BUS_DMA_NOWAIT, &sc->verbuf_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate verbuf memory\n"); return (ENOMEM); } bzero(sc->verbuf_mem, verbuf_size); if (bus_dmamap_load(sc->verbuf_tag, sc->verbuf_dmamap, sc->verbuf_mem, verbuf_size, mrsas_addr_cb, &sc->verbuf_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load verbuf DMA map\n"); return (ENOMEM); } /* * Allocate IO Request Frames */ io_req_size = sc->io_frames_alloc_sz; if (bus_dma_tag_create(sc->mrsas_parent_tag, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, io_req_size, 1, io_req_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->io_request_tag)) { device_printf(sc->mrsas_dev, "Cannot create IO request tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->io_request_tag, (void **)&sc->io_request_mem, BUS_DMA_NOWAIT, &sc->io_request_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc IO request memory\n"); return (ENOMEM); } bzero(sc->io_request_mem, io_req_size); if (bus_dmamap_load(sc->io_request_tag, sc->io_request_dmamap, sc->io_request_mem, io_req_size, mrsas_addr_cb, &sc->io_request_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load IO request memory\n"); return (ENOMEM); } /* * Allocate Chain Frames */ chain_frame_size = sc->chain_frames_alloc_sz; if (bus_dma_tag_create(sc->mrsas_parent_tag, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, chain_frame_size, 1, chain_frame_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->chain_frame_tag)) { device_printf(sc->mrsas_dev, "Cannot create chain frame tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->chain_frame_tag, (void **)&sc->chain_frame_mem, BUS_DMA_NOWAIT, &sc->chain_frame_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc chain frame memory\n"); return (ENOMEM); } bzero(sc->chain_frame_mem, chain_frame_size); if (bus_dmamap_load(sc->chain_frame_tag, sc->chain_frame_dmamap, sc->chain_frame_mem, chain_frame_size, mrsas_addr_cb, &sc->chain_frame_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load chain frame memory\n"); return (ENOMEM); } count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; /* * Allocate Reply Descriptor Array */ reply_desc_size = sc->reply_alloc_sz * count; if (bus_dma_tag_create(sc->mrsas_parent_tag, 16, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, reply_desc_size, 1, reply_desc_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->reply_desc_tag)) { device_printf(sc->mrsas_dev, "Cannot create reply descriptor tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->reply_desc_tag, (void **)&sc->reply_desc_mem, BUS_DMA_NOWAIT, &sc->reply_desc_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc reply descriptor memory\n"); return (ENOMEM); } if (bus_dmamap_load(sc->reply_desc_tag, sc->reply_desc_dmamap, sc->reply_desc_mem, reply_desc_size, mrsas_addr_cb, &sc->reply_desc_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load reply descriptor memory\n"); return (ENOMEM); } /* * Allocate Sense Buffer Array. Keep in lower 4GB */ sense_size = sc->max_fw_cmds * MRSAS_SENSE_LEN; if (bus_dma_tag_create(sc->mrsas_parent_tag, 64, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sense_size, 1, sense_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sense_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate sense buf tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->sense_tag, (void **)&sc->sense_mem, BUS_DMA_NOWAIT, &sc->sense_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate sense buf memory\n"); return (ENOMEM); } if (bus_dmamap_load(sc->sense_tag, sc->sense_dmamap, sc->sense_mem, sense_size, mrsas_addr_cb, &sc->sense_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load sense buf memory\n"); return (ENOMEM); } /* * Allocate for Event detail structure */ evt_detail_size = sizeof(struct mrsas_evt_detail); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, evt_detail_size, 1, evt_detail_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->evt_detail_tag)) { device_printf(sc->mrsas_dev, "Cannot create Event detail tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->evt_detail_tag, (void **)&sc->evt_detail_mem, BUS_DMA_NOWAIT, &sc->evt_detail_dmamap)) { device_printf(sc->mrsas_dev, "Cannot alloc Event detail buffer memory\n"); return (ENOMEM); } bzero(sc->evt_detail_mem, evt_detail_size); if (bus_dmamap_load(sc->evt_detail_tag, sc->evt_detail_dmamap, sc->evt_detail_mem, evt_detail_size, mrsas_addr_cb, &sc->evt_detail_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load Event detail buffer memory\n"); return (ENOMEM); } /* * Create a dma tag for data buffers; size will be the maximum * possible I/O size (280kB). */ if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MAXPHYS, sc->max_num_sge, /* nsegments */ MAXPHYS, BUS_DMA_ALLOCNOW, busdma_lock_mutex, &sc->io_lock, &sc->data_tag)) { device_printf(sc->mrsas_dev, "Cannot create data dma tag\n"); return (ENOMEM); } return (0); } /* * mrsas_addr_cb: Callback function of bus_dmamap_load() * input: callback argument, machine dependent type * that describes DMA segments, number of segments, error code * * This function is for the driver to receive mapping information resultant of * the bus_dmamap_load(). The information is actually not being used, but the * address is saved anyway. */ void mrsas_addr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { bus_addr_t *addr; addr = arg; *addr = segs[0].ds_addr; } /* * mrsas_setup_raidmap: Set up RAID map. * input: Adapter instance soft state * * Allocate DMA memory for the RAID maps and perform setup. */ static int mrsas_setup_raidmap(struct mrsas_softc *sc) { int i; for (i = 0; i < 2; i++) { sc->ld_drv_map[i] = (void *)malloc(sc->drv_map_sz, M_MRSAS, M_NOWAIT); /* Do Error handling */ if (!sc->ld_drv_map[i]) { device_printf(sc->mrsas_dev, "Could not allocate memory for local map"); if (i == 1) free(sc->ld_drv_map[0], M_MRSAS); /* ABORT driver initialization */ goto ABORT; } } for (int i = 0; i < 2; i++) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, sc->max_map_sz, 1, sc->max_map_sz, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->raidmap_tag[i])) { device_printf(sc->mrsas_dev, "Cannot allocate raid map tag.\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->raidmap_tag[i], (void **)&sc->raidmap_mem[i], BUS_DMA_NOWAIT, &sc->raidmap_dmamap[i])) { device_printf(sc->mrsas_dev, "Cannot allocate raidmap memory.\n"); return (ENOMEM); } bzero(sc->raidmap_mem[i], sc->max_map_sz); if (bus_dmamap_load(sc->raidmap_tag[i], sc->raidmap_dmamap[i], sc->raidmap_mem[i], sc->max_map_sz, mrsas_addr_cb, &sc->raidmap_phys_addr[i], BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load raidmap memory.\n"); return (ENOMEM); } if (!sc->raidmap_mem[i]) { device_printf(sc->mrsas_dev, "Cannot allocate memory for raid map.\n"); return (ENOMEM); } } if (!mrsas_get_map_info(sc)) mrsas_sync_map_info(sc); return (0); ABORT: return (1); } /** * megasas_setup_jbod_map - setup jbod map for FP seq_number. * @sc: Adapter soft state * * Return 0 on success. */ void megasas_setup_jbod_map(struct mrsas_softc *sc) { int i; uint32_t pd_seq_map_sz; pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); if (!sc->ctrl_info->adapterOperations3.useSeqNumJbodFP) { sc->use_seqnum_jbod_fp = 0; return; } if (sc->jbodmap_mem[0]) goto skip_alloc; for (i = 0; i < 2; i++) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, pd_seq_map_sz, 1, pd_seq_map_sz, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->jbodmap_tag[i])) { device_printf(sc->mrsas_dev, "Cannot allocate jbod map tag.\n"); return; } if (bus_dmamem_alloc(sc->jbodmap_tag[i], (void **)&sc->jbodmap_mem[i], BUS_DMA_NOWAIT, &sc->jbodmap_dmamap[i])) { device_printf(sc->mrsas_dev, "Cannot allocate jbod map memory.\n"); return; } bzero(sc->jbodmap_mem[i], pd_seq_map_sz); if (bus_dmamap_load(sc->jbodmap_tag[i], sc->jbodmap_dmamap[i], sc->jbodmap_mem[i], pd_seq_map_sz, mrsas_addr_cb, &sc->jbodmap_phys_addr[i], BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load jbod map memory.\n"); return; } if (!sc->jbodmap_mem[i]) { device_printf(sc->mrsas_dev, "Cannot allocate memory for jbod map.\n"); sc->use_seqnum_jbod_fp = 0; return; } } skip_alloc: if (!megasas_sync_pd_seq_num(sc, false) && !megasas_sync_pd_seq_num(sc, true)) sc->use_seqnum_jbod_fp = 1; else sc->use_seqnum_jbod_fp = 0; device_printf(sc->mrsas_dev, "Jbod map is supported\n"); } /* * mrsas_init_fw: Initialize Firmware * input: Adapter soft state * * Calls transition_to_ready() to make sure Firmware is in operational state and * calls mrsas_init_adapter() to send IOC_INIT command to Firmware. It * issues internal commands to get the controller info after the IOC_INIT * command response is received by Firmware. Note: code relating to * get_pdlist, get_ld_list and max_sectors are currently not being used, it * is left here as placeholder. */ static int mrsas_init_fw(struct mrsas_softc *sc) { int ret, loop, ocr = 0; u_int32_t max_sectors_1; u_int32_t max_sectors_2; u_int32_t tmp_sectors; u_int32_t scratch_pad_2; int msix_enable = 0; int fw_msix_count = 0; /* Make sure Firmware is ready */ ret = mrsas_transition_to_ready(sc, ocr); if (ret != SUCCESS) { return (ret); } /* MSI-x index 0- reply post host index register */ sc->msix_reg_offset[0] = MPI2_REPLY_POST_HOST_INDEX_OFFSET; /* Check if MSI-X is supported while in ready state */ msix_enable = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & 0x4000000) >> 0x1a; if (msix_enable) { scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_2)); /* Check max MSI-X vectors */ if (sc->device_id == MRSAS_TBOLT) { sc->msix_vectors = (scratch_pad_2 & MR_MAX_REPLY_QUEUES_OFFSET) + 1; fw_msix_count = sc->msix_vectors; } else { /* Invader/Fury supports 96 MSI-X vectors */ sc->msix_vectors = ((scratch_pad_2 & MR_MAX_REPLY_QUEUES_EXT_OFFSET) >> MR_MAX_REPLY_QUEUES_EXT_OFFSET_SHIFT) + 1; fw_msix_count = sc->msix_vectors; for (loop = 1; loop < MR_MAX_MSIX_REG_ARRAY; loop++) { sc->msix_reg_offset[loop] = MPI2_SUP_REPLY_POST_HOST_INDEX_OFFSET + (loop * 0x10); } } /* Don't bother allocating more MSI-X vectors than cpus */ sc->msix_vectors = min(sc->msix_vectors, mp_ncpus); /* Allocate MSI-x vectors */ if (mrsas_allocate_msix(sc) == SUCCESS) sc->msix_enable = 1; else sc->msix_enable = 0; device_printf(sc->mrsas_dev, "FW supports <%d> MSIX vector," "Online CPU %d Current MSIX <%d>\n", fw_msix_count, mp_ncpus, sc->msix_vectors); } if (mrsas_init_adapter(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Adapter initialize Fail.\n"); return (1); } /* Allocate internal commands for pass-thru */ if (mrsas_alloc_mfi_cmds(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Allocate MFI cmd failed.\n"); return (1); } sc->ctrl_info = malloc(sizeof(struct mrsas_ctrl_info), M_MRSAS, M_NOWAIT); if (!sc->ctrl_info) { device_printf(sc->mrsas_dev, "Malloc for ctrl_info failed.\n"); return (1); } /* * Get the controller info from FW, so that the MAX VD support * availability can be decided. */ if (mrsas_get_ctrl_info(sc)) { device_printf(sc->mrsas_dev, "Unable to get FW ctrl_info.\n"); return (1); } sc->secure_jbod_support = (u_int8_t)sc->ctrl_info->adapterOperations3.supportSecurityonJBOD; if (sc->secure_jbod_support) device_printf(sc->mrsas_dev, "FW supports SED \n"); if (sc->use_seqnum_jbod_fp) device_printf(sc->mrsas_dev, "FW supports JBOD Map \n"); if (mrsas_setup_raidmap(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Error: RAID map setup FAILED !!! " "There seems to be some problem in the controller\n" "Please contact to the SUPPORT TEAM if the problem persists\n"); } megasas_setup_jbod_map(sc); /* For pass-thru, get PD/LD list and controller info */ memset(sc->pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); if (mrsas_get_pd_list(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Get PD list failed.\n"); return (1); } memset(sc->ld_ids, 0xff, MRSAS_MAX_LD_IDS); if (mrsas_get_ld_list(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Get LD lsit failed.\n"); return (1); } /* * Compute the max allowed sectors per IO: The controller info has * two limits on max sectors. Driver should use the minimum of these * two. * * 1 << stripe_sz_ops.min = max sectors per strip * * Note that older firmwares ( < FW ver 30) didn't report information to * calculate max_sectors_1. So the number ended up as zero always. */ tmp_sectors = 0; max_sectors_1 = (1 << sc->ctrl_info->stripe_sz_ops.min) * sc->ctrl_info->max_strips_per_io; max_sectors_2 = sc->ctrl_info->max_request_size; tmp_sectors = min(max_sectors_1, max_sectors_2); sc->max_sectors_per_req = sc->max_num_sge * MRSAS_PAGE_SIZE / 512; if (tmp_sectors && (sc->max_sectors_per_req > tmp_sectors)) sc->max_sectors_per_req = tmp_sectors; sc->disableOnlineCtrlReset = sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; sc->UnevenSpanSupport = sc->ctrl_info->adapterOperations2.supportUnevenSpans; if (sc->UnevenSpanSupport) { device_printf(sc->mrsas_dev, "FW supports: UnevenSpanSupport=%x\n\n", sc->UnevenSpanSupport); if (MR_ValidateMapInfo(sc)) sc->fast_path_io = 1; else sc->fast_path_io = 0; } return (0); } /* * mrsas_init_adapter: Initializes the adapter/controller * input: Adapter soft state * * Prepares for the issuing of the IOC Init cmd to FW for initializing the * ROC/controller. The FW register is read to determined the number of * commands that is supported. All memory allocations for IO is based on * max_cmd. Appropriate calculations are performed in this function. */ int mrsas_init_adapter(struct mrsas_softc *sc) { uint32_t status; u_int32_t max_cmd, scratch_pad_2; int ret; int i = 0; /* Read FW status register */ status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); /* Get operational params from status register */ sc->max_fw_cmds = status & MRSAS_FWSTATE_MAXCMD_MASK; /* Decrement the max supported by 1, to correlate with FW */ sc->max_fw_cmds = sc->max_fw_cmds - 1; max_cmd = sc->max_fw_cmds; /* Determine allocation size of command frames */ sc->reply_q_depth = ((max_cmd + 1 + 15) / 16 * 16) * 2; sc->request_alloc_sz = sizeof(MRSAS_REQUEST_DESCRIPTOR_UNION) * max_cmd; sc->reply_alloc_sz = sizeof(MPI2_REPLY_DESCRIPTORS_UNION) * (sc->reply_q_depth); sc->io_frames_alloc_sz = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE + (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * (max_cmd + 1)); scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_2)); /* * If scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK is set, * Firmware support extended IO chain frame which is 4 time more * than legacy Firmware. Legacy Firmware - Frame size is (8 * 128) = * 1K 1M IO Firmware - Frame size is (8 * 128 * 4) = 4K */ if (scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_UNITS_MASK) sc->max_chain_frame_sz = ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) * MEGASAS_1MB_IO; else sc->max_chain_frame_sz = ((scratch_pad_2 & MEGASAS_MAX_CHAIN_SIZE_MASK) >> 5) * MEGASAS_256K_IO; sc->chain_frames_alloc_sz = sc->max_chain_frame_sz * max_cmd; sc->max_sge_in_main_msg = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL)) / 16; sc->max_sge_in_chain = sc->max_chain_frame_sz / sizeof(MPI2_SGE_IO_UNION); sc->max_num_sge = sc->max_sge_in_main_msg + sc->max_sge_in_chain - 2; mrsas_dprint(sc, MRSAS_INFO, "Avago Debug: MAX sge 0x%X MAX chain frame size 0x%X \n", sc->max_num_sge, sc->max_chain_frame_sz); /* Used for pass thru MFI frame (DCMD) */ sc->chain_offset_mfi_pthru = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 16; sc->chain_offset_io_request = (MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE - sizeof(MPI2_SGE_IO_UNION)) / 16; int count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < count; i++) sc->last_reply_idx[i] = 0; ret = mrsas_alloc_mem(sc); if (ret != SUCCESS) return (ret); ret = mrsas_alloc_mpt_cmds(sc); if (ret != SUCCESS) return (ret); ret = mrsas_ioc_init(sc); if (ret != SUCCESS) return (ret); return (0); } /* * mrsas_alloc_ioc_cmd: Allocates memory for IOC Init command * input: Adapter soft state * * Allocates for the IOC Init cmd to FW to initialize the ROC/controller. */ int mrsas_alloc_ioc_cmd(struct mrsas_softc *sc) { int ioc_init_size; /* Allocate IOC INIT command */ ioc_init_size = 1024 + sizeof(MPI2_IOC_INIT_REQUEST); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ioc_init_size, 1, ioc_init_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->ioc_init_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate ioc init tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->ioc_init_tag, (void **)&sc->ioc_init_mem, BUS_DMA_NOWAIT, &sc->ioc_init_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate ioc init cmd mem\n"); return (ENOMEM); } bzero(sc->ioc_init_mem, ioc_init_size); if (bus_dmamap_load(sc->ioc_init_tag, sc->ioc_init_dmamap, sc->ioc_init_mem, ioc_init_size, mrsas_addr_cb, &sc->ioc_init_phys_mem, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ioc init cmd mem\n"); return (ENOMEM); } return (0); } /* * mrsas_free_ioc_cmd: Allocates memory for IOC Init command * input: Adapter soft state * * Deallocates memory of the IOC Init cmd. */ void mrsas_free_ioc_cmd(struct mrsas_softc *sc) { if (sc->ioc_init_phys_mem) bus_dmamap_unload(sc->ioc_init_tag, sc->ioc_init_dmamap); if (sc->ioc_init_mem != NULL) bus_dmamem_free(sc->ioc_init_tag, sc->ioc_init_mem, sc->ioc_init_dmamap); if (sc->ioc_init_tag != NULL) bus_dma_tag_destroy(sc->ioc_init_tag); } /* * mrsas_ioc_init: Sends IOC Init command to FW * input: Adapter soft state * * Issues the IOC Init cmd to FW to initialize the ROC/controller. */ int mrsas_ioc_init(struct mrsas_softc *sc) { struct mrsas_init_frame *init_frame; pMpi2IOCInitRequest_t IOCInitMsg; MRSAS_REQUEST_DESCRIPTOR_UNION req_desc; u_int8_t max_wait = MRSAS_IOC_INIT_WAIT_TIME; bus_addr_t phys_addr; int i, retcode = 0; u_int32_t scratch_pad_2; /* Allocate memory for the IOC INIT command */ if (mrsas_alloc_ioc_cmd(sc)) { device_printf(sc->mrsas_dev, "Cannot allocate IOC command.\n"); return (1); } if (!sc->block_sync_cache) { scratch_pad_2 = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad_2)); sc->fw_sync_cache_support = (scratch_pad_2 & MR_CAN_HANDLE_SYNC_CACHE_OFFSET) ? 1 : 0; } IOCInitMsg = (pMpi2IOCInitRequest_t)(((char *)sc->ioc_init_mem) + 1024); IOCInitMsg->Function = MPI2_FUNCTION_IOC_INIT; IOCInitMsg->WhoInit = MPI2_WHOINIT_HOST_DRIVER; IOCInitMsg->MsgVersion = MPI2_VERSION; IOCInitMsg->HeaderVersion = MPI2_HEADER_VERSION; IOCInitMsg->SystemRequestFrameSize = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; IOCInitMsg->ReplyDescriptorPostQueueDepth = sc->reply_q_depth; IOCInitMsg->ReplyDescriptorPostQueueAddress = sc->reply_desc_phys_addr; IOCInitMsg->SystemRequestFrameBaseAddress = sc->io_request_phys_addr; IOCInitMsg->HostMSIxVectors = (sc->msix_vectors > 0 ? sc->msix_vectors : 0); init_frame = (struct mrsas_init_frame *)sc->ioc_init_mem; init_frame->cmd = MFI_CMD_INIT; init_frame->cmd_status = 0xFF; init_frame->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; /* driver support Extended MSIX */ if (sc->mrsas_gen3_ctrl) { init_frame->driver_operations. mfi_capabilities.support_additional_msix = 1; } if (sc->verbuf_mem) { snprintf((char *)sc->verbuf_mem, strlen(MRSAS_VERSION) + 2, "%s\n", MRSAS_VERSION); init_frame->driver_ver_lo = (bus_addr_t)sc->verbuf_phys_addr; init_frame->driver_ver_hi = 0; } init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb = 1; init_frame->driver_operations.mfi_capabilities.support_max_255lds = 1; init_frame->driver_operations.mfi_capabilities.security_protocol_cmds_fw = 1; if (sc->max_chain_frame_sz > MEGASAS_CHAIN_FRAME_SZ_MIN) init_frame->driver_operations.mfi_capabilities.support_ext_io_size = 1; phys_addr = (bus_addr_t)sc->ioc_init_phys_mem + 1024; init_frame->queue_info_new_phys_addr_lo = phys_addr; init_frame->data_xfer_len = sizeof(Mpi2IOCInitRequest_t); req_desc.addr.Words = (bus_addr_t)sc->ioc_init_phys_mem; req_desc.MFAIo.RequestFlags = (MRSAS_REQ_DESCRIPT_FLAGS_MFA << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); mrsas_disable_intr(sc); mrsas_dprint(sc, MRSAS_OCR, "Issuing IOC INIT command to FW.\n"); mrsas_fire_cmd(sc, req_desc.addr.u.low, req_desc.addr.u.high); /* * Poll response timer to wait for Firmware response. While this * timer with the DELAY call could block CPU, the time interval for * this is only 1 millisecond. */ if (init_frame->cmd_status == 0xFF) { for (i = 0; i < (max_wait * 1000); i++) { if (init_frame->cmd_status == 0xFF) DELAY(1000); else break; } } if (init_frame->cmd_status == 0) mrsas_dprint(sc, MRSAS_OCR, "IOC INIT response received from FW.\n"); else { if (init_frame->cmd_status == 0xFF) device_printf(sc->mrsas_dev, "IOC Init timed out after %d seconds.\n", max_wait); else device_printf(sc->mrsas_dev, "IOC Init failed, status = 0x%x\n", init_frame->cmd_status); retcode = 1; } mrsas_free_ioc_cmd(sc); return (retcode); } /* * mrsas_alloc_mpt_cmds: Allocates the command packets * input: Adapter instance soft state * * This function allocates the internal commands for IOs. Each command that is * issued to FW is wrapped in a local data structure called mrsas_mpt_cmd. An * array is allocated with mrsas_mpt_cmd context. The free commands are * maintained in a linked list (cmd pool). SMID value range is from 1 to * max_fw_cmds. */ int mrsas_alloc_mpt_cmds(struct mrsas_softc *sc) { int i, j; u_int32_t max_cmd, count; struct mrsas_mpt_cmd *cmd; pMpi2ReplyDescriptorsUnion_t reply_desc; u_int32_t offset, chain_offset, sense_offset; bus_addr_t io_req_base_phys, chain_frame_base_phys, sense_base_phys; u_int8_t *io_req_base, *chain_frame_base, *sense_base; max_cmd = sc->max_fw_cmds; sc->req_desc = malloc(sc->request_alloc_sz, M_MRSAS, M_NOWAIT); if (!sc->req_desc) { device_printf(sc->mrsas_dev, "Out of memory, cannot alloc req desc\n"); return (ENOMEM); } memset(sc->req_desc, 0, sc->request_alloc_sz); /* * sc->mpt_cmd_list is an array of struct mrsas_mpt_cmd pointers. * Allocate the dynamic array first and then allocate individual * commands. */ - sc->mpt_cmd_list = malloc(sizeof(struct mrsas_mpt_cmd *) * max_cmd, M_MRSAS, M_NOWAIT); + sc->mpt_cmd_list = mallocarray(max_cmd, sizeof(struct mrsas_mpt_cmd *), + M_MRSAS, M_NOWAIT); if (!sc->mpt_cmd_list) { device_printf(sc->mrsas_dev, "Cannot alloc memory for mpt_cmd_list.\n"); return (ENOMEM); } memset(sc->mpt_cmd_list, 0, sizeof(struct mrsas_mpt_cmd *) * max_cmd); for (i = 0; i < max_cmd; i++) { sc->mpt_cmd_list[i] = malloc(sizeof(struct mrsas_mpt_cmd), M_MRSAS, M_NOWAIT); if (!sc->mpt_cmd_list[i]) { for (j = 0; j < i; j++) free(sc->mpt_cmd_list[j], M_MRSAS); free(sc->mpt_cmd_list, M_MRSAS); sc->mpt_cmd_list = NULL; return (ENOMEM); } } io_req_base = (u_int8_t *)sc->io_request_mem + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; io_req_base_phys = (bus_addr_t)sc->io_request_phys_addr + MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE; chain_frame_base = (u_int8_t *)sc->chain_frame_mem; chain_frame_base_phys = (bus_addr_t)sc->chain_frame_phys_addr; sense_base = (u_int8_t *)sc->sense_mem; sense_base_phys = (bus_addr_t)sc->sense_phys_addr; for (i = 0; i < max_cmd; i++) { cmd = sc->mpt_cmd_list[i]; offset = MRSAS_MPI2_RAID_DEFAULT_IO_FRAME_SIZE * i; chain_offset = sc->max_chain_frame_sz * i; sense_offset = MRSAS_SENSE_LEN * i; memset(cmd, 0, sizeof(struct mrsas_mpt_cmd)); cmd->index = i + 1; cmd->ccb_ptr = NULL; callout_init_mtx(&cmd->cm_callout, &sc->sim_lock, 0); cmd->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; cmd->sc = sc; cmd->io_request = (MRSAS_RAID_SCSI_IO_REQUEST *) (io_req_base + offset); memset(cmd->io_request, 0, sizeof(MRSAS_RAID_SCSI_IO_REQUEST)); cmd->io_request_phys_addr = io_req_base_phys + offset; cmd->chain_frame = (MPI2_SGE_IO_UNION *) (chain_frame_base + chain_offset); cmd->chain_frame_phys_addr = chain_frame_base_phys + chain_offset; cmd->sense = sense_base + sense_offset; cmd->sense_phys_addr = sense_base_phys + sense_offset; if (bus_dmamap_create(sc->data_tag, 0, &cmd->data_dmamap)) { return (FAIL); } TAILQ_INSERT_TAIL(&(sc->mrsas_mpt_cmd_list_head), cmd, next); } /* Initialize reply descriptor array to 0xFFFFFFFF */ reply_desc = sc->reply_desc_mem; count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < sc->reply_q_depth * count; i++, reply_desc++) { reply_desc->Words = MRSAS_ULONG_MAX; } return (0); } /* * mrsas_fire_cmd: Sends command to FW * input: Adapter softstate * request descriptor address low * request descriptor address high * * This functions fires the command to Firmware by writing to the * inbound_low_queue_port and inbound_high_queue_port. */ void mrsas_fire_cmd(struct mrsas_softc *sc, u_int32_t req_desc_lo, u_int32_t req_desc_hi) { mtx_lock(&sc->pci_lock); mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_low_queue_port), req_desc_lo); mrsas_write_reg(sc, offsetof(mrsas_reg_set, inbound_high_queue_port), req_desc_hi); mtx_unlock(&sc->pci_lock); } /* * mrsas_transition_to_ready: Move FW to Ready state input: * Adapter instance soft state * * During the initialization, FW passes can potentially be in any one of several * possible states. If the FW in operational, waiting-for-handshake states, * driver must take steps to bring it to ready state. Otherwise, it has to * wait for the ready state. */ int mrsas_transition_to_ready(struct mrsas_softc *sc, int ocr) { int i; u_int8_t max_wait; u_int32_t val, fw_state; u_int32_t cur_state; u_int32_t abs_state, curr_abs_state; val = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); fw_state = val & MFI_STATE_MASK; max_wait = MRSAS_RESET_WAIT_TIME; if (fw_state != MFI_STATE_READY) device_printf(sc->mrsas_dev, "Waiting for FW to come to ready state\n"); while (fw_state != MFI_STATE_READY) { abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); switch (fw_state) { case MFI_STATE_FAULT: device_printf(sc->mrsas_dev, "FW is in FAULT state!!\n"); if (ocr) { cur_state = MFI_STATE_FAULT; break; } else return -ENODEV; case MFI_STATE_WAIT_HANDSHAKE: /* Set the CLR bit in inbound doorbell */ mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_INIT_CLEAR_HANDSHAKE | MFI_INIT_HOTPLUG); cur_state = MFI_STATE_WAIT_HANDSHAKE; break; case MFI_STATE_BOOT_MESSAGE_PENDING: mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_INIT_HOTPLUG); cur_state = MFI_STATE_BOOT_MESSAGE_PENDING; break; case MFI_STATE_OPERATIONAL: /* * Bring it to READY state; assuming max wait 10 * secs */ mrsas_disable_intr(sc); mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_RESET_FLAGS); for (i = 0; i < max_wait * 1000; i++) { if (mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)) & 1) DELAY(1000); else break; } cur_state = MFI_STATE_OPERATIONAL; break; case MFI_STATE_UNDEFINED: /* * This state should not last for more than 2 * seconds */ cur_state = MFI_STATE_UNDEFINED; break; case MFI_STATE_BB_INIT: cur_state = MFI_STATE_BB_INIT; break; case MFI_STATE_FW_INIT: cur_state = MFI_STATE_FW_INIT; break; case MFI_STATE_FW_INIT_2: cur_state = MFI_STATE_FW_INIT_2; break; case MFI_STATE_DEVICE_SCAN: cur_state = MFI_STATE_DEVICE_SCAN; break; case MFI_STATE_FLUSH_CACHE: cur_state = MFI_STATE_FLUSH_CACHE; break; default: device_printf(sc->mrsas_dev, "Unknown state 0x%x\n", fw_state); return -ENODEV; } /* * The cur_state should not last for more than max_wait secs */ for (i = 0; i < (max_wait * 1000); i++) { fw_state = (mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK); curr_abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); if (abs_state == curr_abs_state) DELAY(1000); else break; } /* * Return error if fw_state hasn't changed after max_wait */ if (curr_abs_state == abs_state) { device_printf(sc->mrsas_dev, "FW state [%d] hasn't changed " "in %d secs\n", fw_state, max_wait); return -ENODEV; } } mrsas_dprint(sc, MRSAS_OCR, "FW now in Ready state\n"); return 0; } /* * mrsas_get_mfi_cmd: Get a cmd from free command pool * input: Adapter soft state * * This function removes an MFI command from the command list. */ struct mrsas_mfi_cmd * mrsas_get_mfi_cmd(struct mrsas_softc *sc) { struct mrsas_mfi_cmd *cmd = NULL; mtx_lock(&sc->mfi_cmd_pool_lock); if (!TAILQ_EMPTY(&sc->mrsas_mfi_cmd_list_head)) { cmd = TAILQ_FIRST(&sc->mrsas_mfi_cmd_list_head); TAILQ_REMOVE(&sc->mrsas_mfi_cmd_list_head, cmd, next); } mtx_unlock(&sc->mfi_cmd_pool_lock); return cmd; } /* * mrsas_ocr_thread: Thread to handle OCR/Kill Adapter. * input: Adapter Context. * * This function will check FW status register and flag do_timeout_reset flag. * It will do OCR/Kill adapter if FW is in fault state or IO timed out has * trigger reset. */ static void mrsas_ocr_thread(void *arg) { struct mrsas_softc *sc; u_int32_t fw_status, fw_state; u_int8_t tm_target_reset_failed = 0; sc = (struct mrsas_softc *)arg; mrsas_dprint(sc, MRSAS_TRACE, "%s\n", __func__); sc->ocr_thread_active = 1; mtx_lock(&sc->sim_lock); for (;;) { /* Sleep for 1 second and check the queue status */ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); if (sc->remove_in_progress || sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { mrsas_dprint(sc, MRSAS_OCR, "Exit due to %s from %s\n", sc->remove_in_progress ? "Shutdown" : "Hardware critical error", __func__); break; } fw_status = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); fw_state = fw_status & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT || sc->do_timedout_reset || mrsas_atomic_read(&sc->target_reset_outstanding)) { /* First, freeze further IOs to come to the SIM */ mrsas_xpt_freeze(sc); /* If this is an IO timeout then go for target reset */ if (mrsas_atomic_read(&sc->target_reset_outstanding)) { device_printf(sc->mrsas_dev, "Initiating Target RESET " "because of SCSI IO timeout!\n"); /* Let the remaining IOs to complete */ msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_reset_targets", 5 * hz); /* Try to reset the target device */ if (mrsas_reset_targets(sc) == FAIL) tm_target_reset_failed = 1; } /* If this is a DCMD timeout or FW fault, * then go for controller reset */ if (fw_state == MFI_STATE_FAULT || tm_target_reset_failed || (sc->do_timedout_reset == MFI_DCMD_TIMEOUT_OCR)) { if (tm_target_reset_failed) device_printf(sc->mrsas_dev, "Initiaiting OCR because of " "TM FAILURE!\n"); else device_printf(sc->mrsas_dev, "Initiaiting OCR " "because of %s!\n", sc->do_timedout_reset ? "DCMD IO Timeout" : "FW fault"); mtx_lock_spin(&sc->ioctl_lock); sc->reset_in_progress = 1; mtx_unlock_spin(&sc->ioctl_lock); sc->reset_count++; /* * Wait for the AEN task to be completed if it is running. */ mtx_unlock(&sc->sim_lock); taskqueue_drain(sc->ev_tq, &sc->ev_task); mtx_lock(&sc->sim_lock); taskqueue_block(sc->ev_tq); /* Try to reset the controller */ mrsas_reset_ctrl(sc, sc->do_timedout_reset); sc->do_timedout_reset = 0; sc->reset_in_progress = 0; tm_target_reset_failed = 0; mrsas_atomic_set(&sc->target_reset_outstanding, 0); memset(sc->target_reset_pool, 0, sizeof(sc->target_reset_pool)); taskqueue_unblock(sc->ev_tq); } /* Now allow IOs to come to the SIM */ mrsas_xpt_release(sc); } } mtx_unlock(&sc->sim_lock); sc->ocr_thread_active = 0; mrsas_kproc_exit(0); } /* * mrsas_reset_reply_desc: Reset Reply descriptor as part of OCR. * input: Adapter Context. * * This function will clear reply descriptor so that post OCR driver and FW will * lost old history. */ void mrsas_reset_reply_desc(struct mrsas_softc *sc) { int i, count; pMpi2ReplyDescriptorsUnion_t reply_desc; count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < count; i++) sc->last_reply_idx[i] = 0; reply_desc = sc->reply_desc_mem; for (i = 0; i < sc->reply_q_depth; i++, reply_desc++) { reply_desc->Words = MRSAS_ULONG_MAX; } } /* * mrsas_reset_ctrl: Core function to OCR/Kill adapter. * input: Adapter Context. * * This function will run from thread context so that it can sleep. 1. Do not * handle OCR if FW is in HW critical error. 2. Wait for outstanding command * to complete for 180 seconds. 3. If #2 does not find any outstanding * command Controller is in working state, so skip OCR. Otherwise, do * OCR/kill Adapter based on flag disableOnlineCtrlReset. 4. Start of the * OCR, return all SCSI command back to CAM layer which has ccb_ptr. 5. Post * OCR, Re-fire Management command and move Controller to Operation state. */ int mrsas_reset_ctrl(struct mrsas_softc *sc, u_int8_t reset_reason) { int retval = SUCCESS, i, j, retry = 0; u_int32_t host_diag, abs_state, status_reg, reset_adapter; union ccb *ccb; struct mrsas_mfi_cmd *mfi_cmd; struct mrsas_mpt_cmd *mpt_cmd; union mrsas_evt_class_locale class_locale; MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) { device_printf(sc->mrsas_dev, "mrsas: Hardware critical error, returning FAIL.\n"); return FAIL; } mrsas_set_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); sc->adprecovery = MRSAS_ADPRESET_SM_INFAULT; mrsas_disable_intr(sc); msleep(&sc->ocr_chan, &sc->sim_lock, PRIBIO, "mrsas_ocr", sc->mrsas_fw_fault_check_delay * hz); /* First try waiting for commands to complete */ if (mrsas_wait_for_outstanding(sc, reset_reason)) { mrsas_dprint(sc, MRSAS_OCR, "resetting adapter from %s.\n", __func__); /* Now return commands back to the CAM layer */ mtx_unlock(&sc->sim_lock); for (i = 0; i < sc->max_fw_cmds; i++) { mpt_cmd = sc->mpt_cmd_list[i]; if (mpt_cmd->ccb_ptr) { ccb = (union ccb *)(mpt_cmd->ccb_ptr); ccb->ccb_h.status = CAM_SCSI_BUS_RESET; mrsas_cmd_done(sc, mpt_cmd); mrsas_atomic_dec(&sc->fw_outstanding); } } mtx_lock(&sc->sim_lock); status_reg = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)); abs_state = status_reg & MFI_STATE_MASK; reset_adapter = status_reg & MFI_RESET_ADAPTER; if (sc->disableOnlineCtrlReset || (abs_state == MFI_STATE_FAULT && !reset_adapter)) { /* Reset not supported, kill adapter */ mrsas_dprint(sc, MRSAS_OCR, "Reset not supported, killing adapter.\n"); mrsas_kill_hba(sc); retval = FAIL; goto out; } /* Now try to reset the chip */ for (i = 0; i < MRSAS_FUSION_MAX_RESET_TRIES; i++) { mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_FLUSH_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_1ST_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_2ND_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_3RD_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_4TH_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_5TH_KEY_VALUE); mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_seq_offset), MPI2_WRSEQ_6TH_KEY_VALUE); /* Check that the diag write enable (DRWE) bit is on */ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag)); retry = 0; while (!(host_diag & HOST_DIAG_WRITE_ENABLE)) { DELAY(100 * 1000); host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag)); if (retry++ == 100) { mrsas_dprint(sc, MRSAS_OCR, "Host diag unlock failed!\n"); break; } } if (!(host_diag & HOST_DIAG_WRITE_ENABLE)) continue; /* Send chip reset command */ mrsas_write_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag), host_diag | HOST_DIAG_RESET_ADAPTER); DELAY(3000 * 1000); /* Make sure reset adapter bit is cleared */ host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag)); retry = 0; while (host_diag & HOST_DIAG_RESET_ADAPTER) { DELAY(100 * 1000); host_diag = mrsas_read_reg(sc, offsetof(mrsas_reg_set, fusion_host_diag)); if (retry++ == 1000) { mrsas_dprint(sc, MRSAS_OCR, "Diag reset adapter never cleared!\n"); break; } } if (host_diag & HOST_DIAG_RESET_ADAPTER) continue; abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK; retry = 0; while ((abs_state <= MFI_STATE_FW_INIT) && (retry++ < 1000)) { DELAY(100 * 1000); abs_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK; } if (abs_state <= MFI_STATE_FW_INIT) { mrsas_dprint(sc, MRSAS_OCR, "firmware state < MFI_STATE_FW_INIT," " state = 0x%x\n", abs_state); continue; } /* Wait for FW to become ready */ if (mrsas_transition_to_ready(sc, 1)) { mrsas_dprint(sc, MRSAS_OCR, "mrsas: Failed to transition controller to ready.\n"); continue; } mrsas_reset_reply_desc(sc); if (mrsas_ioc_init(sc)) { mrsas_dprint(sc, MRSAS_OCR, "mrsas_ioc_init() failed!\n"); continue; } for (j = 0; j < sc->max_fw_cmds; j++) { mpt_cmd = sc->mpt_cmd_list[j]; if (mpt_cmd->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { mfi_cmd = sc->mfi_cmd_list[mpt_cmd->sync_cmd_idx]; /* If not an IOCTL then release the command else re-fire */ if (!mfi_cmd->sync_cmd) { mrsas_release_mfi_cmd(mfi_cmd); } else { req_desc = mrsas_get_request_desc(sc, mfi_cmd->cmd_id.context.smid - 1); mrsas_dprint(sc, MRSAS_OCR, "Re-fire command DCMD opcode 0x%x index %d\n ", mfi_cmd->frame->dcmd.opcode, j); if (!req_desc) device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); else mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); } } } /* Reset load balance info */ memset(sc->load_balance_info, 0, sizeof(LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT); if (mrsas_get_ctrl_info(sc)) { mrsas_kill_hba(sc); retval = FAIL; goto out; } if (!mrsas_get_map_info(sc)) mrsas_sync_map_info(sc); megasas_setup_jbod_map(sc); mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); mrsas_enable_intr(sc); sc->adprecovery = MRSAS_HBA_OPERATIONAL; /* Register AEN with FW for last sequence number */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; mtx_unlock(&sc->sim_lock); if (mrsas_register_aen(sc, sc->last_seq_num, class_locale.word)) { device_printf(sc->mrsas_dev, "ERROR: AEN registration FAILED from OCR !!! " "Further events from the controller cannot be notified." "Either there is some problem in the controller" "or the controller does not support AEN.\n" "Please contact to the SUPPORT TEAM if the problem persists\n"); } mtx_lock(&sc->sim_lock); /* Adapter reset completed successfully */ device_printf(sc->mrsas_dev, "Reset successful\n"); retval = SUCCESS; goto out; } /* Reset failed, kill the adapter */ device_printf(sc->mrsas_dev, "Reset failed, killing adapter.\n"); mrsas_kill_hba(sc); retval = FAIL; } else { mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); mrsas_enable_intr(sc); sc->adprecovery = MRSAS_HBA_OPERATIONAL; } out: mrsas_clear_bit(MRSAS_FUSION_IN_RESET, &sc->reset_flags); mrsas_dprint(sc, MRSAS_OCR, "Reset Exit with %d.\n", retval); return retval; } /* * mrsas_kill_hba: Kill HBA when OCR is not supported * input: Adapter Context. * * This function will kill HBA when OCR is not supported. */ void mrsas_kill_hba(struct mrsas_softc *sc) { sc->adprecovery = MRSAS_HW_CRITICAL_ERROR; DELAY(1000 * 1000); mrsas_dprint(sc, MRSAS_OCR, "%s\n", __func__); mrsas_write_reg(sc, offsetof(mrsas_reg_set, doorbell), MFI_STOP_ADP); /* Flush */ mrsas_read_reg(sc, offsetof(mrsas_reg_set, doorbell)); mrsas_complete_outstanding_ioctls(sc); } /** * mrsas_complete_outstanding_ioctls Complete pending IOCTLS after kill_hba * input: Controller softc * * Returns void */ void mrsas_complete_outstanding_ioctls(struct mrsas_softc *sc) { int i; struct mrsas_mpt_cmd *cmd_mpt; struct mrsas_mfi_cmd *cmd_mfi; u_int32_t count, MSIxIndex; count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; for (i = 0; i < sc->max_fw_cmds; i++) { cmd_mpt = sc->mpt_cmd_list[i]; if (cmd_mpt->sync_cmd_idx != (u_int32_t)MRSAS_ULONG_MAX) { cmd_mfi = sc->mfi_cmd_list[cmd_mpt->sync_cmd_idx]; if (cmd_mfi->sync_cmd && cmd_mfi->frame->hdr.cmd != MFI_CMD_ABORT) { for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) mrsas_complete_mptmfi_passthru(sc, cmd_mfi, cmd_mpt->io_request->RaidContext.status); } } } } /* * mrsas_wait_for_outstanding: Wait for outstanding commands * input: Adapter Context. * * This function will wait for 180 seconds for outstanding commands to be * completed. */ int mrsas_wait_for_outstanding(struct mrsas_softc *sc, u_int8_t check_reason) { int i, outstanding, retval = 0; u_int32_t fw_state, count, MSIxIndex; for (i = 0; i < MRSAS_RESET_WAIT_TIME; i++) { if (sc->remove_in_progress) { mrsas_dprint(sc, MRSAS_OCR, "Driver remove or shutdown called.\n"); retval = 1; goto out; } /* Check if firmware is in fault state */ fw_state = mrsas_read_reg(sc, offsetof(mrsas_reg_set, outbound_scratch_pad)) & MFI_STATE_MASK; if (fw_state == MFI_STATE_FAULT) { mrsas_dprint(sc, MRSAS_OCR, "Found FW in FAULT state, will reset adapter.\n"); count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; mtx_unlock(&sc->sim_lock); for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) mrsas_complete_cmd(sc, MSIxIndex); mtx_lock(&sc->sim_lock); retval = 1; goto out; } if (check_reason == MFI_DCMD_TIMEOUT_OCR) { mrsas_dprint(sc, MRSAS_OCR, "DCMD IO TIMEOUT detected, will reset adapter.\n"); retval = 1; goto out; } outstanding = mrsas_atomic_read(&sc->fw_outstanding); if (!outstanding) goto out; if (!(i % MRSAS_RESET_NOTICE_INTERVAL)) { mrsas_dprint(sc, MRSAS_OCR, "[%2d]waiting for %d " "commands to complete\n", i, outstanding); count = sc->msix_vectors > 0 ? sc->msix_vectors : 1; mtx_unlock(&sc->sim_lock); for (MSIxIndex = 0; MSIxIndex < count; MSIxIndex++) mrsas_complete_cmd(sc, MSIxIndex); mtx_lock(&sc->sim_lock); } DELAY(1000 * 1000); } if (mrsas_atomic_read(&sc->fw_outstanding)) { mrsas_dprint(sc, MRSAS_OCR, " pending commands remain after waiting," " will reset adapter.\n"); retval = 1; } out: return retval; } /* * mrsas_release_mfi_cmd: Return a cmd to free command pool * input: Command packet for return to free cmd pool * * This function returns the MFI & MPT command to the command list. */ void mrsas_release_mfi_cmd(struct mrsas_mfi_cmd *cmd_mfi) { struct mrsas_softc *sc = cmd_mfi->sc; struct mrsas_mpt_cmd *cmd_mpt; mtx_lock(&sc->mfi_cmd_pool_lock); /* * Release the mpt command (if at all it is allocated * associated with the mfi command */ if (cmd_mfi->cmd_id.context.smid) { mtx_lock(&sc->mpt_cmd_pool_lock); /* Get the mpt cmd from mfi cmd frame's smid value */ cmd_mpt = sc->mpt_cmd_list[cmd_mfi->cmd_id.context.smid-1]; cmd_mpt->flags = 0; cmd_mpt->sync_cmd_idx = (u_int32_t)MRSAS_ULONG_MAX; TAILQ_INSERT_HEAD(&(sc->mrsas_mpt_cmd_list_head), cmd_mpt, next); mtx_unlock(&sc->mpt_cmd_pool_lock); } /* Release the mfi command */ cmd_mfi->ccb_ptr = NULL; cmd_mfi->cmd_id.frame_count = 0; TAILQ_INSERT_HEAD(&(sc->mrsas_mfi_cmd_list_head), cmd_mfi, next); mtx_unlock(&sc->mfi_cmd_pool_lock); return; } /* * mrsas_get_controller_info: Returns FW's controller structure * input: Adapter soft state * Controller information structure * * Issues an internal command (DCMD) to get the FW's controller structure. This * information is mainly used to find out the maximum IO transfer per command * supported by the FW. */ static int mrsas_get_ctrl_info(struct mrsas_softc *sc) { int retcode = 0; u_int8_t do_ocr = 1; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Failed to get a free cmd\n"); return -ENOMEM; } dcmd = &cmd->frame->dcmd; if (mrsas_alloc_ctlr_info_cmd(sc) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot allocate get ctlr info cmd\n"); mrsas_release_mfi_cmd(cmd); return -ENOMEM; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sizeof(struct mrsas_ctrl_info); dcmd->opcode = MR_DCMD_CTRL_GET_INFO; dcmd->sgl.sge32[0].phys_addr = sc->ctlr_info_phys_addr; dcmd->sgl.sge32[0].length = sizeof(struct mrsas_ctrl_info); if (!sc->mask_interrupts) retcode = mrsas_issue_blocked_cmd(sc, cmd); else retcode = mrsas_issue_polled(sc, cmd); if (retcode == ETIMEDOUT) goto dcmd_timeout; else memcpy(sc->ctrl_info, sc->ctlr_info_mem, sizeof(struct mrsas_ctrl_info)); do_ocr = 0; mrsas_update_ext_vd_details(sc); sc->use_seqnum_jbod_fp = sc->ctrl_info->adapterOperations3.useSeqNumJbodFP; sc->disableOnlineCtrlReset = sc->ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; dcmd_timeout: mrsas_free_ctlr_info_cmd(sc); if (do_ocr) sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; if (!sc->mask_interrupts) mrsas_release_mfi_cmd(cmd); return (retcode); } /* * mrsas_update_ext_vd_details : Update details w.r.t Extended VD * input: * sc - Controller's softc */ static void mrsas_update_ext_vd_details(struct mrsas_softc *sc) { sc->max256vdSupport = sc->ctrl_info->adapterOperations3.supportMaxExtLDs; /* Below is additional check to address future FW enhancement */ if (sc->ctrl_info->max_lds > 64) sc->max256vdSupport = 1; sc->drv_supported_vd_count = MRSAS_MAX_LD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL; sc->drv_supported_pd_count = MRSAS_MAX_PD_CHANNELS * MRSAS_MAX_DEV_PER_CHANNEL; if (sc->max256vdSupport) { sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT; sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } else { sc->fw_supported_vd_count = MAX_LOGICAL_DRIVES; sc->fw_supported_pd_count = MAX_PHYSICAL_DEVICES; } sc->old_map_sz = sizeof(MR_FW_RAID_MAP) + (sizeof(MR_LD_SPAN_MAP) * (sc->fw_supported_vd_count - 1)); sc->new_map_sz = sizeof(MR_FW_RAID_MAP_EXT); sc->drv_map_sz = sizeof(MR_DRV_RAID_MAP) + (sizeof(MR_LD_SPAN_MAP) * (sc->drv_supported_vd_count - 1)); sc->max_map_sz = max(sc->old_map_sz, sc->new_map_sz); if (sc->max256vdSupport) sc->current_map_sz = sc->new_map_sz; else sc->current_map_sz = sc->old_map_sz; } /* * mrsas_alloc_ctlr_info_cmd: Allocates memory for controller info command * input: Adapter soft state * * Allocates DMAable memory for the controller info internal command. */ int mrsas_alloc_ctlr_info_cmd(struct mrsas_softc *sc) { int ctlr_info_size; /* Allocate get controller info command */ ctlr_info_size = sizeof(struct mrsas_ctrl_info); if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, ctlr_info_size, 1, ctlr_info_size, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->ctlr_info_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate ctlr info tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->ctlr_info_tag, (void **)&sc->ctlr_info_mem, BUS_DMA_NOWAIT, &sc->ctlr_info_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate ctlr info cmd mem\n"); return (ENOMEM); } if (bus_dmamap_load(sc->ctlr_info_tag, sc->ctlr_info_dmamap, sc->ctlr_info_mem, ctlr_info_size, mrsas_addr_cb, &sc->ctlr_info_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load ctlr info cmd mem\n"); return (ENOMEM); } memset(sc->ctlr_info_mem, 0, ctlr_info_size); return (0); } /* * mrsas_free_ctlr_info_cmd: Free memory for controller info command * input: Adapter soft state * * Deallocates memory of the get controller info cmd. */ void mrsas_free_ctlr_info_cmd(struct mrsas_softc *sc) { if (sc->ctlr_info_phys_addr) bus_dmamap_unload(sc->ctlr_info_tag, sc->ctlr_info_dmamap); if (sc->ctlr_info_mem != NULL) bus_dmamem_free(sc->ctlr_info_tag, sc->ctlr_info_mem, sc->ctlr_info_dmamap); if (sc->ctlr_info_tag != NULL) bus_dma_tag_destroy(sc->ctlr_info_tag); } /* * mrsas_issue_polled: Issues a polling command * inputs: Adapter soft state * Command packet to be issued * * This function is for posting of internal commands to Firmware. MFI requires * the cmd_status to be set to 0xFF before posting. The maximun wait time of * the poll response timer is 180 seconds. */ int mrsas_issue_polled(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { struct mrsas_header *frame_hdr = &cmd->frame->hdr; u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; int i, retcode = SUCCESS; frame_hdr->cmd_status = 0xFF; frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; /* Issue the frame using inbound queue port */ if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); return (1); } /* * Poll response timer to wait for Firmware response. While this * timer with the DELAY call could block CPU, the time interval for * this is only 1 millisecond. */ if (frame_hdr->cmd_status == 0xFF) { for (i = 0; i < (max_wait * 1000); i++) { if (frame_hdr->cmd_status == 0xFF) DELAY(1000); else break; } } if (frame_hdr->cmd_status == 0xFF) { device_printf(sc->mrsas_dev, "DCMD timed out after %d " "seconds from %s\n", max_wait, __func__); device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", cmd->frame->dcmd.opcode); retcode = ETIMEDOUT; } return (retcode); } /* * mrsas_issue_dcmd: Issues a MFI Pass thru cmd * input: Adapter soft state mfi cmd pointer * * This function is called by mrsas_issued_blocked_cmd() and * mrsas_issued_polled(), to build the MPT command and then fire the command * to Firmware. */ int mrsas_issue_dcmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; req_desc = mrsas_build_mpt_cmd(sc, cmd); if (!req_desc) { device_printf(sc->mrsas_dev, "Cannot build MPT cmd.\n"); return (1); } mrsas_fire_cmd(sc, req_desc->addr.u.low, req_desc->addr.u.high); return (0); } /* * mrsas_build_mpt_cmd: Calls helper function to build Passthru cmd * input: Adapter soft state mfi cmd to build * * This function is called by mrsas_issue_cmd() to build the MPT-MFI passthru * command and prepares the MPT command to send to Firmware. */ MRSAS_REQUEST_DESCRIPTOR_UNION * mrsas_build_mpt_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { MRSAS_REQUEST_DESCRIPTOR_UNION *req_desc; u_int16_t index; if (mrsas_build_mptmfi_passthru(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot build MPT-MFI passthru cmd.\n"); return NULL; } index = cmd->cmd_id.context.smid; req_desc = mrsas_get_request_desc(sc, index - 1); if (!req_desc) return NULL; req_desc->addr.Words = 0; req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << MRSAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); req_desc->SCSIIO.SMID = index; return (req_desc); } /* * mrsas_build_mptmfi_passthru: Builds a MPT MFI Passthru command * input: Adapter soft state mfi cmd pointer * * The MPT command and the io_request are setup as a passthru command. The SGE * chain address is set to frame_phys_addr of the MFI command. */ u_int8_t mrsas_build_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *mfi_cmd) { MPI25_IEEE_SGE_CHAIN64 *mpi25_ieee_chain; PTR_MRSAS_RAID_SCSI_IO_REQUEST io_req; struct mrsas_mpt_cmd *mpt_cmd; struct mrsas_header *frame_hdr = &mfi_cmd->frame->hdr; mpt_cmd = mrsas_get_mpt_cmd(sc); if (!mpt_cmd) return (1); /* Save the smid. To be used for returning the cmd */ mfi_cmd->cmd_id.context.smid = mpt_cmd->index; mpt_cmd->sync_cmd_idx = mfi_cmd->index; /* * For cmds where the flag is set, store the flag and check on * completion. For cmds with this flag, don't call * mrsas_complete_cmd. */ if (frame_hdr->flags & MFI_FRAME_DONT_POST_IN_REPLY_QUEUE) mpt_cmd->flags = MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; io_req = mpt_cmd->io_request; if (sc->mrsas_gen3_ctrl) { pMpi25IeeeSgeChain64_t sgl_ptr_end = (pMpi25IeeeSgeChain64_t)&io_req->SGL; sgl_ptr_end += sc->max_sge_in_main_msg - 1; sgl_ptr_end->Flags = 0; } mpi25_ieee_chain = (MPI25_IEEE_SGE_CHAIN64 *) & io_req->SGL.IeeeChain; io_req->Function = MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST; io_req->SGLOffset0 = offsetof(MRSAS_RAID_SCSI_IO_REQUEST, SGL) / 4; io_req->ChainOffset = sc->chain_offset_mfi_pthru; mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; mpi25_ieee_chain->Length = sc->max_chain_frame_sz; return (0); } /* * mrsas_issue_blocked_cmd: Synchronous wrapper around regular FW cmds * input: Adapter soft state Command to be issued * * This function waits on an event for the command to be returned from the ISR. * Max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME secs. Used for issuing * internal and ioctl commands. */ int mrsas_issue_blocked_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; unsigned long total_time = 0; int retcode = SUCCESS; /* Initialize cmd_status */ cmd->cmd_status = 0xFF; /* Build MPT-MFI command for issue to FW */ if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Cannot issue DCMD internal command.\n"); return (1); } sc->chan = (void *)&cmd; while (1) { if (cmd->cmd_status == 0xFF) { tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); } else break; if (!cmd->sync_cmd) { /* cmd->sync will be set for an IOCTL * command */ total_time++; if (total_time >= max_wait) { device_printf(sc->mrsas_dev, "Internal command timed out after %d seconds.\n", max_wait); retcode = 1; break; } } } if (cmd->cmd_status == 0xFF) { device_printf(sc->mrsas_dev, "DCMD timed out after %d " "seconds from %s\n", max_wait, __func__); device_printf(sc->mrsas_dev, "DCMD opcode 0x%X\n", cmd->frame->dcmd.opcode); retcode = ETIMEDOUT; } return (retcode); } /* * mrsas_complete_mptmfi_passthru: Completes a command * input: @sc: Adapter soft state * @cmd: Command to be completed * @status: cmd completion status * * This function is called from mrsas_complete_cmd() after an interrupt is * received from Firmware, and io_request->Function is * MRSAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST. */ void mrsas_complete_mptmfi_passthru(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd, u_int8_t status) { struct mrsas_header *hdr = &cmd->frame->hdr; u_int8_t cmd_status = cmd->frame->hdr.cmd_status; /* Reset the retry counter for future re-tries */ cmd->retry_for_fw_reset = 0; if (cmd->ccb_ptr) cmd->ccb_ptr = NULL; switch (hdr->cmd) { case MFI_CMD_INVALID: device_printf(sc->mrsas_dev, "MFI_CMD_INVALID command.\n"); break; case MFI_CMD_PD_SCSI_IO: case MFI_CMD_LD_SCSI_IO: /* * MFI_CMD_PD_SCSI_IO and MFI_CMD_LD_SCSI_IO could have been * issued either through an IO path or an IOCTL path. If it * was via IOCTL, we will send it to internal completion. */ if (cmd->sync_cmd) { cmd->sync_cmd = 0; mrsas_wakeup(sc, cmd); break; } case MFI_CMD_SMP: case MFI_CMD_STP: case MFI_CMD_DCMD: /* Check for LD map update */ if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && (cmd->frame->dcmd.mbox.b[1] == 1)) { sc->fast_path_io = 0; mtx_lock(&sc->raidmap_lock); sc->map_update_cmd = NULL; if (cmd_status != 0) { if (cmd_status != MFI_STAT_NOT_FOUND) device_printf(sc->mrsas_dev, "map sync failed, status=%x\n", cmd_status); else { mrsas_release_mfi_cmd(cmd); mtx_unlock(&sc->raidmap_lock); break; } } else sc->map_id++; mrsas_release_mfi_cmd(cmd); if (MR_ValidateMapInfo(sc)) sc->fast_path_io = 0; else sc->fast_path_io = 1; mrsas_sync_map_info(sc); mtx_unlock(&sc->raidmap_lock); break; } if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { sc->mrsas_aen_triggered = 0; } /* FW has an updated PD sequence */ if ((cmd->frame->dcmd.opcode == MR_DCMD_SYSTEM_PD_MAP_GET_INFO) && (cmd->frame->dcmd.mbox.b[0] == 1)) { mtx_lock(&sc->raidmap_lock); sc->jbod_seq_cmd = NULL; mrsas_release_mfi_cmd(cmd); if (cmd_status == MFI_STAT_OK) { sc->pd_seq_map_id++; /* Re-register a pd sync seq num cmd */ if (megasas_sync_pd_seq_num(sc, true)) sc->use_seqnum_jbod_fp = 0; } else { sc->use_seqnum_jbod_fp = 0; device_printf(sc->mrsas_dev, "Jbod map sync failed, status=%x\n", cmd_status); } mtx_unlock(&sc->raidmap_lock); break; } /* See if got an event notification */ if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) mrsas_complete_aen(sc, cmd); else mrsas_wakeup(sc, cmd); break; case MFI_CMD_ABORT: /* Command issued to abort another cmd return */ mrsas_complete_abort(sc, cmd); break; default: device_printf(sc->mrsas_dev, "Unknown command completed! [0x%X]\n", hdr->cmd); break; } } /* * mrsas_wakeup: Completes an internal command * input: Adapter soft state * Command to be completed * * In mrsas_issue_blocked_cmd(), after a command is issued to Firmware, a wait * timer is started. This function is called from * mrsas_complete_mptmfi_passthru() as it completes the command, to wake up * from the command wait. */ void mrsas_wakeup(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { cmd->cmd_status = cmd->frame->io.cmd_status; if (cmd->cmd_status == 0xFF) cmd->cmd_status = 0; sc->chan = (void *)&cmd; wakeup_one((void *)&sc->chan); return; } /* * mrsas_shutdown_ctlr: Instructs FW to shutdown the controller input: * Adapter soft state Shutdown/Hibernate * * This function issues a DCMD internal command to Firmware to initiate shutdown * of the controller. */ static void mrsas_shutdown_ctlr(struct mrsas_softc *sc, u_int32_t opcode) { struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) return; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot allocate for shutdown cmd.\n"); return; } if (sc->aen_cmd) mrsas_issue_blocked_abort_cmd(sc, sc->aen_cmd); if (sc->map_update_cmd) mrsas_issue_blocked_abort_cmd(sc, sc->map_update_cmd); if (sc->jbod_seq_cmd) mrsas_issue_blocked_abort_cmd(sc, sc->jbod_seq_cmd); dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = opcode; device_printf(sc->mrsas_dev, "Preparing to shut down controller.\n"); mrsas_issue_blocked_cmd(sc, cmd); mrsas_release_mfi_cmd(cmd); return; } /* * mrsas_flush_cache: Requests FW to flush all its caches input: * Adapter soft state * * This function is issues a DCMD internal command to Firmware to initiate * flushing of all caches. */ static void mrsas_flush_cache(struct mrsas_softc *sc) { struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; if (sc->adprecovery == MRSAS_HW_CRITICAL_ERROR) return; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot allocate for flush cache cmd.\n"); return; } dcmd = &cmd->frame->dcmd; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0x0; dcmd->sge_count = 0; dcmd->flags = MFI_FRAME_DIR_NONE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = 0; dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; mrsas_issue_blocked_cmd(sc, cmd); mrsas_release_mfi_cmd(cmd); return; } int megasas_sync_pd_seq_num(struct mrsas_softc *sc, boolean_t pend) { int retcode = 0; u_int8_t do_ocr = 1; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; uint32_t pd_seq_map_sz; struct MR_PD_CFG_SEQ_NUM_SYNC *pd_sync; bus_addr_t pd_seq_h; pd_seq_map_sz = sizeof(struct MR_PD_CFG_SEQ_NUM_SYNC) + (sizeof(struct MR_PD_CFG_SEQ) * (MAX_PHYSICAL_DEVICES - 1)); cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n"); return 1; } dcmd = &cmd->frame->dcmd; pd_sync = (void *)sc->jbodmap_mem[(sc->pd_seq_map_id & 1)]; pd_seq_h = sc->jbodmap_phys_addr[(sc->pd_seq_map_id & 1)]; if (!pd_sync) { device_printf(sc->mrsas_dev, "Failed to alloc mem for jbod map info.\n"); mrsas_release_mfi_cmd(cmd); return (ENOMEM); } memset(pd_sync, 0, pd_seq_map_sz); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = (pd_seq_map_sz); dcmd->opcode = (MR_DCMD_SYSTEM_PD_MAP_GET_INFO); dcmd->sgl.sge32[0].phys_addr = (pd_seq_h); dcmd->sgl.sge32[0].length = (pd_seq_map_sz); if (pend) { dcmd->mbox.b[0] = MRSAS_DCMD_MBOX_PEND_FLAG; dcmd->flags = (MFI_FRAME_DIR_WRITE); sc->jbod_seq_cmd = cmd; if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n"); return 1; } else return 0; } else dcmd->flags = MFI_FRAME_DIR_READ; retcode = mrsas_issue_polled(sc, cmd); if (retcode == ETIMEDOUT) goto dcmd_timeout; if (pd_sync->count > MAX_PHYSICAL_DEVICES) { device_printf(sc->mrsas_dev, "driver supports max %d JBOD, but FW reports %d\n", MAX_PHYSICAL_DEVICES, pd_sync->count); retcode = -EINVAL; } if (!retcode) sc->pd_seq_map_id++; do_ocr = 0; dcmd_timeout: if (do_ocr) sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; return (retcode); } /* * mrsas_get_map_info: Load and validate RAID map input: * Adapter instance soft state * * This function calls mrsas_get_ld_map_info() and MR_ValidateMapInfo() to load * and validate RAID map. It returns 0 if successful, 1 other- wise. */ static int mrsas_get_map_info(struct mrsas_softc *sc) { uint8_t retcode = 0; sc->fast_path_io = 0; if (!mrsas_get_ld_map_info(sc)) { retcode = MR_ValidateMapInfo(sc); if (retcode == 0) { sc->fast_path_io = 1; return 0; } } return 1; } /* * mrsas_get_ld_map_info: Get FW's ld_map structure input: * Adapter instance soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. */ static int mrsas_get_ld_map_info(struct mrsas_softc *sc) { int retcode = 0; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; void *map; bus_addr_t map_phys_addr = 0; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for ld map info cmd.\n"); return 1; } dcmd = &cmd->frame->dcmd; map = (void *)sc->raidmap_mem[(sc->map_id & 1)]; map_phys_addr = sc->raidmap_phys_addr[(sc->map_id & 1)]; if (!map) { device_printf(sc->mrsas_dev, "Failed to alloc mem for ld map info.\n"); mrsas_release_mfi_cmd(cmd); return (ENOMEM); } memset(map, 0, sizeof(sc->max_map_sz)); memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sc->current_map_sz; dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; dcmd->sgl.sge32[0].phys_addr = map_phys_addr; dcmd->sgl.sge32[0].length = sc->current_map_sz; retcode = mrsas_issue_polled(sc, cmd); if (retcode == ETIMEDOUT) sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; return (retcode); } /* * mrsas_sync_map_info: Get FW's ld_map structure input: * Adapter instance soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. */ static int mrsas_sync_map_info(struct mrsas_softc *sc) { int retcode = 0, i; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; uint32_t size_sync_info, num_lds; MR_LD_TARGET_SYNC *target_map = NULL; MR_DRV_RAID_MAP_ALL *map; MR_LD_RAID *raid; MR_LD_TARGET_SYNC *ld_sync; bus_addr_t map_phys_addr = 0; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for sync map info cmd\n"); return ENOMEM; } map = sc->ld_drv_map[sc->map_id & 1]; num_lds = map->raidMap.ldCount; dcmd = &cmd->frame->dcmd; size_sync_info = sizeof(MR_LD_TARGET_SYNC) * num_lds; memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); target_map = (MR_LD_TARGET_SYNC *) sc->raidmap_mem[(sc->map_id - 1) & 1]; memset(target_map, 0, sc->max_map_sz); map_phys_addr = sc->raidmap_phys_addr[(sc->map_id - 1) & 1]; ld_sync = (MR_LD_TARGET_SYNC *) target_map; for (i = 0; i < num_lds; i++, ld_sync++) { raid = MR_LdRaidGet(i, map); ld_sync->targetId = MR_GetLDTgtId(i, map); ld_sync->seqNum = raid->seqNum; } dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_WRITE; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = sc->current_map_sz; dcmd->mbox.b[0] = num_lds; dcmd->mbox.b[1] = MRSAS_DCMD_MBOX_PEND_FLAG; dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; dcmd->sgl.sge32[0].phys_addr = map_phys_addr; dcmd->sgl.sge32[0].length = sc->current_map_sz; sc->map_update_cmd = cmd; if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Fail to send sync map info command.\n"); return (1); } return (retcode); } /* * mrsas_get_pd_list: Returns FW's PD list structure input: * Adapter soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. This information is mainly used to find out about system * supported by Firmware. */ static int mrsas_get_pd_list(struct mrsas_softc *sc) { int retcode = 0, pd_index = 0, pd_count = 0, pd_list_size; u_int8_t do_ocr = 1; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; struct MR_PD_LIST *pd_list_mem; struct MR_PD_ADDRESS *pd_addr; bus_addr_t pd_list_phys_addr = 0; struct mrsas_tmp_dcmd *tcmd; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for get PD list cmd\n"); return 1; } dcmd = &cmd->frame->dcmd; tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); pd_list_size = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); if (mrsas_alloc_tmp_dcmd(sc, tcmd, pd_list_size) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get PD list cmd\n"); mrsas_release_mfi_cmd(cmd); mrsas_free_tmp_dcmd(tcmd); free(tcmd, M_MRSAS); return (ENOMEM); } else { pd_list_mem = tcmd->tmp_dcmd_mem; pd_list_phys_addr = tcmd->tmp_dcmd_phys_addr; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); dcmd->mbox.b[0] = MR_PD_QUERY_TYPE_EXPOSED_TO_HOST; dcmd->mbox.b[1] = 0; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->pad_0 = 0; dcmd->data_xfer_len = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); dcmd->opcode = MR_DCMD_PD_LIST_QUERY; dcmd->sgl.sge32[0].phys_addr = pd_list_phys_addr; dcmd->sgl.sge32[0].length = MRSAS_MAX_PD * sizeof(struct MR_PD_LIST); if (!sc->mask_interrupts) retcode = mrsas_issue_blocked_cmd(sc, cmd); else retcode = mrsas_issue_polled(sc, cmd); if (retcode == ETIMEDOUT) goto dcmd_timeout; /* Get the instance PD list */ pd_count = MRSAS_MAX_PD; pd_addr = pd_list_mem->addr; if (pd_list_mem->count < pd_count) { memset(sc->local_pd_list, 0, MRSAS_MAX_PD * sizeof(struct mrsas_pd_list)); for (pd_index = 0; pd_index < pd_list_mem->count; pd_index++) { sc->local_pd_list[pd_addr->deviceId].tid = pd_addr->deviceId; sc->local_pd_list[pd_addr->deviceId].driveType = pd_addr->scsiDevType; sc->local_pd_list[pd_addr->deviceId].driveState = MR_PD_STATE_SYSTEM; pd_addr++; } /* * Use mutext/spinlock if pd_list component size increase more than * 32 bit. */ memcpy(sc->pd_list, sc->local_pd_list, sizeof(sc->local_pd_list)); do_ocr = 0; } dcmd_timeout: mrsas_free_tmp_dcmd(tcmd); free(tcmd, M_MRSAS); if (do_ocr) sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; if (!sc->mask_interrupts) mrsas_release_mfi_cmd(cmd); return (retcode); } /* * mrsas_get_ld_list: Returns FW's LD list structure input: * Adapter soft state * * Issues an internal command (DCMD) to get the FW's controller PD list * structure. This information is mainly used to find out about supported by * the FW. */ static int mrsas_get_ld_list(struct mrsas_softc *sc) { int ld_list_size, retcode = 0, ld_index = 0, ids = 0; u_int8_t do_ocr = 1; struct mrsas_mfi_cmd *cmd; struct mrsas_dcmd_frame *dcmd; struct MR_LD_LIST *ld_list_mem; bus_addr_t ld_list_phys_addr = 0; struct mrsas_tmp_dcmd *tcmd; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for get LD list cmd\n"); return 1; } dcmd = &cmd->frame->dcmd; tcmd = malloc(sizeof(struct mrsas_tmp_dcmd), M_MRSAS, M_NOWAIT); ld_list_size = sizeof(struct MR_LD_LIST); if (mrsas_alloc_tmp_dcmd(sc, tcmd, ld_list_size) != SUCCESS) { device_printf(sc->mrsas_dev, "Cannot alloc dmamap for get LD list cmd\n"); mrsas_release_mfi_cmd(cmd); mrsas_free_tmp_dcmd(tcmd); free(tcmd, M_MRSAS); return (ENOMEM); } else { ld_list_mem = tcmd->tmp_dcmd_mem; ld_list_phys_addr = tcmd->tmp_dcmd_phys_addr; } memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); if (sc->max256vdSupport) dcmd->mbox.b[0] = 1; dcmd->cmd = MFI_CMD_DCMD; dcmd->cmd_status = 0xFF; dcmd->sge_count = 1; dcmd->flags = MFI_FRAME_DIR_READ; dcmd->timeout = 0; dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); dcmd->opcode = MR_DCMD_LD_GET_LIST; dcmd->sgl.sge32[0].phys_addr = ld_list_phys_addr; dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); dcmd->pad_0 = 0; if (!sc->mask_interrupts) retcode = mrsas_issue_blocked_cmd(sc, cmd); else retcode = mrsas_issue_polled(sc, cmd); if (retcode == ETIMEDOUT) goto dcmd_timeout; #if VD_EXT_DEBUG printf("Number of LDs %d\n", ld_list_mem->ldCount); #endif /* Get the instance LD list */ if (ld_list_mem->ldCount <= sc->fw_supported_vd_count) { sc->CurLdCount = ld_list_mem->ldCount; memset(sc->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT); for (ld_index = 0; ld_index < ld_list_mem->ldCount; ld_index++) { if (ld_list_mem->ldList[ld_index].state != 0) { ids = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; sc->ld_ids[ids] = ld_list_mem->ldList[ld_index].ref.ld_context.targetId; } } do_ocr = 0; } dcmd_timeout: mrsas_free_tmp_dcmd(tcmd); free(tcmd, M_MRSAS); if (do_ocr) sc->do_timedout_reset = MFI_DCMD_TIMEOUT_OCR; if (!sc->mask_interrupts) mrsas_release_mfi_cmd(cmd); return (retcode); } /* * mrsas_alloc_tmp_dcmd: Allocates memory for temporary command input: * Adapter soft state Temp command Size of alloction * * Allocates DMAable memory for a temporary internal command. The allocated * memory is initialized to all zeros upon successful loading of the dma * mapped memory. */ int mrsas_alloc_tmp_dcmd(struct mrsas_softc *sc, struct mrsas_tmp_dcmd *tcmd, int size) { if (bus_dma_tag_create(sc->mrsas_parent_tag, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, BUS_DMA_ALLOCNOW, NULL, NULL, &tcmd->tmp_dcmd_tag)) { device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(tcmd->tmp_dcmd_tag, (void **)&tcmd->tmp_dcmd_mem, BUS_DMA_NOWAIT, &tcmd->tmp_dcmd_dmamap)) { device_printf(sc->mrsas_dev, "Cannot allocate tmp dcmd mem\n"); return (ENOMEM); } if (bus_dmamap_load(tcmd->tmp_dcmd_tag, tcmd->tmp_dcmd_dmamap, tcmd->tmp_dcmd_mem, size, mrsas_addr_cb, &tcmd->tmp_dcmd_phys_addr, BUS_DMA_NOWAIT)) { device_printf(sc->mrsas_dev, "Cannot load tmp dcmd mem\n"); return (ENOMEM); } memset(tcmd->tmp_dcmd_mem, 0, size); return (0); } /* * mrsas_free_tmp_dcmd: Free memory for temporary command input: * temporary dcmd pointer * * Deallocates memory of the temporary command for use in the construction of * the internal DCMD. */ void mrsas_free_tmp_dcmd(struct mrsas_tmp_dcmd *tmp) { if (tmp->tmp_dcmd_phys_addr) bus_dmamap_unload(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_dmamap); if (tmp->tmp_dcmd_mem != NULL) bus_dmamem_free(tmp->tmp_dcmd_tag, tmp->tmp_dcmd_mem, tmp->tmp_dcmd_dmamap); if (tmp->tmp_dcmd_tag != NULL) bus_dma_tag_destroy(tmp->tmp_dcmd_tag); } /* * mrsas_issue_blocked_abort_cmd: Aborts previously issued cmd input: * Adapter soft state Previously issued cmd to be aborted * * This function is used to abort previously issued commands, such as AEN and * RAID map sync map commands. The abort command is sent as a DCMD internal * command and subsequently the driver will wait for a return status. The * max wait time is MRSAS_INTERNAL_CMD_WAIT_TIME seconds. */ static int mrsas_issue_blocked_abort_cmd(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd_to_abort) { struct mrsas_mfi_cmd *cmd; struct mrsas_abort_frame *abort_fr; u_int8_t retcode = 0; unsigned long total_time = 0; u_int8_t max_wait = MRSAS_INTERNAL_CMD_WAIT_TIME; cmd = mrsas_get_mfi_cmd(sc); if (!cmd) { device_printf(sc->mrsas_dev, "Cannot alloc for abort cmd\n"); return (1); } abort_fr = &cmd->frame->abort; /* Prepare and issue the abort frame */ abort_fr->cmd = MFI_CMD_ABORT; abort_fr->cmd_status = 0xFF; abort_fr->flags = 0; abort_fr->abort_context = cmd_to_abort->index; abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; abort_fr->abort_mfi_phys_addr_hi = 0; cmd->sync_cmd = 1; cmd->cmd_status = 0xFF; if (mrsas_issue_dcmd(sc, cmd)) { device_printf(sc->mrsas_dev, "Fail to send abort command.\n"); return (1); } /* Wait for this cmd to complete */ sc->chan = (void *)&cmd; while (1) { if (cmd->cmd_status == 0xFF) { tsleep((void *)&sc->chan, 0, "mrsas_sleep", hz); } else break; total_time++; if (total_time >= max_wait) { device_printf(sc->mrsas_dev, "Abort cmd timed out after %d sec.\n", max_wait); retcode = 1; break; } } cmd->sync_cmd = 0; mrsas_release_mfi_cmd(cmd); return (retcode); } /* * mrsas_complete_abort: Completes aborting a command input: * Adapter soft state Cmd that was issued to abort another cmd * * The mrsas_issue_blocked_abort_cmd() function waits for the command status to * change after sending the command. This function is called from * mrsas_complete_mptmfi_passthru() to wake up the sleep thread associated. */ void mrsas_complete_abort(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { if (cmd->sync_cmd) { cmd->sync_cmd = 0; cmd->cmd_status = 0; sc->chan = (void *)&cmd; wakeup_one((void *)&sc->chan); } return; } /* * mrsas_aen_handler: AEN processing callback function from thread context * input: Adapter soft state * * Asynchronous event handler */ void mrsas_aen_handler(struct mrsas_softc *sc) { union mrsas_evt_class_locale class_locale; int doscan = 0; u_int32_t seq_num; int error, fail_aen = 0; if (sc == NULL) { printf("invalid instance!\n"); return; } if (sc->remove_in_progress || sc->reset_in_progress) { device_printf(sc->mrsas_dev, "Returning from %s, line no %d\n", __func__, __LINE__); return; } if (sc->evt_detail_mem) { switch (sc->evt_detail_mem->code) { case MR_EVT_PD_INSERTED: fail_aen = mrsas_get_pd_list(sc); if (!fail_aen) mrsas_bus_scan_sim(sc, sc->sim_1); else goto skip_register_aen; break; case MR_EVT_PD_REMOVED: fail_aen = mrsas_get_pd_list(sc); if (!fail_aen) mrsas_bus_scan_sim(sc, sc->sim_1); else goto skip_register_aen; break; case MR_EVT_LD_OFFLINE: case MR_EVT_CFG_CLEARED: case MR_EVT_LD_DELETED: mrsas_bus_scan_sim(sc, sc->sim_0); break; case MR_EVT_LD_CREATED: fail_aen = mrsas_get_ld_list(sc); if (!fail_aen) mrsas_bus_scan_sim(sc, sc->sim_0); else goto skip_register_aen; break; case MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED: case MR_EVT_FOREIGN_CFG_IMPORTED: case MR_EVT_LD_STATE_CHANGE: doscan = 1; break; case MR_EVT_CTRL_PROP_CHANGED: fail_aen = mrsas_get_ctrl_info(sc); if (fail_aen) goto skip_register_aen; break; default: break; } } else { device_printf(sc->mrsas_dev, "invalid evt_detail\n"); return; } if (doscan) { fail_aen = mrsas_get_pd_list(sc); if (!fail_aen) { mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 1\n"); mrsas_bus_scan_sim(sc, sc->sim_1); } else goto skip_register_aen; fail_aen = mrsas_get_ld_list(sc); if (!fail_aen) { mrsas_dprint(sc, MRSAS_AEN, "scanning ...sim 0\n"); mrsas_bus_scan_sim(sc, sc->sim_0); } else goto skip_register_aen; } seq_num = sc->evt_detail_mem->seq_num + 1; /* Register AEN with FW for latest sequence number plus 1 */ class_locale.members.reserved = 0; class_locale.members.locale = MR_EVT_LOCALE_ALL; class_locale.members.class = MR_EVT_CLASS_DEBUG; if (sc->aen_cmd != NULL) return; mtx_lock(&sc->aen_lock); error = mrsas_register_aen(sc, seq_num, class_locale.word); mtx_unlock(&sc->aen_lock); if (error) device_printf(sc->mrsas_dev, "register aen failed error %x\n", error); skip_register_aen: return; } /* * mrsas_complete_aen: Completes AEN command * input: Adapter soft state * Cmd that was issued to abort another cmd * * This function will be called from ISR and will continue event processing from * thread context by enqueuing task in ev_tq (callback function * "mrsas_aen_handler"). */ void mrsas_complete_aen(struct mrsas_softc *sc, struct mrsas_mfi_cmd *cmd) { /* * Don't signal app if it is just an aborted previously registered * aen */ if ((!cmd->abort_aen) && (sc->remove_in_progress == 0)) { sc->mrsas_aen_triggered = 1; mtx_lock(&sc->aen_lock); if (sc->mrsas_poll_waiting) { sc->mrsas_poll_waiting = 0; selwakeup(&sc->mrsas_select); } mtx_unlock(&sc->aen_lock); } else cmd->abort_aen = 0; sc->aen_cmd = NULL; mrsas_release_mfi_cmd(cmd); taskqueue_enqueue(sc->ev_tq, &sc->ev_task); return; } static device_method_t mrsas_methods[] = { DEVMETHOD(device_probe, mrsas_probe), DEVMETHOD(device_attach, mrsas_attach), DEVMETHOD(device_detach, mrsas_detach), DEVMETHOD(device_suspend, mrsas_suspend), DEVMETHOD(device_resume, mrsas_resume), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), {0, 0} }; static driver_t mrsas_driver = { "mrsas", mrsas_methods, sizeof(struct mrsas_softc) }; static devclass_t mrsas_devclass; DRIVER_MODULE(mrsas, pci, mrsas_driver, mrsas_devclass, 0, 0); MODULE_DEPEND(mrsas, cam, 1, 1, 1); Index: head/sys/dev/mxge/if_mxge.c =================================================================== --- head/sys/dev/mxge/if_mxge.c (revision 327948) +++ head/sys/dev/mxge/if_mxge.c (revision 327949) @@ -1,5026 +1,5026 @@ /****************************************************************************** SPDX-License-Identifier: BSD-2-Clause-FreeBSD Copyright (c) 2006-2013, Myricom Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of the Myricom Inc, nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* XXX for pci_cfg_restore */ #include /* for pmap_mapdev() */ #include #if defined(__i386) || defined(__amd64) #include #endif #include #include /*#define MXGE_FAKE_IFP*/ #include #ifdef IFNET_BUF_RING #include #endif #include "opt_inet.h" #include "opt_inet6.h" /* tunable params */ static int mxge_nvidia_ecrc_enable = 1; static int mxge_force_firmware = 0; static int mxge_intr_coal_delay = 30; static int mxge_deassert_wait = 1; static int mxge_flow_control = 1; static int mxge_verbose = 0; static int mxge_ticks; static int mxge_max_slices = 1; static int mxge_rss_hash_type = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT; static int mxge_always_promisc = 0; static int mxge_initial_mtu = ETHERMTU_JUMBO; static int mxge_throttle = 0; static char *mxge_fw_unaligned = "mxge_ethp_z8e"; static char *mxge_fw_aligned = "mxge_eth_z8e"; static char *mxge_fw_rss_aligned = "mxge_rss_eth_z8e"; static char *mxge_fw_rss_unaligned = "mxge_rss_ethp_z8e"; static int mxge_probe(device_t dev); static int mxge_attach(device_t dev); static int mxge_detach(device_t dev); static int mxge_shutdown(device_t dev); static void mxge_intr(void *arg); static device_method_t mxge_methods[] = { /* Device interface */ DEVMETHOD(device_probe, mxge_probe), DEVMETHOD(device_attach, mxge_attach), DEVMETHOD(device_detach, mxge_detach), DEVMETHOD(device_shutdown, mxge_shutdown), DEVMETHOD_END }; static driver_t mxge_driver = { "mxge", mxge_methods, sizeof(mxge_softc_t), }; static devclass_t mxge_devclass; /* Declare ourselves to be a child of the PCI bus.*/ DRIVER_MODULE(mxge, pci, mxge_driver, mxge_devclass, 0, 0); MODULE_DEPEND(mxge, firmware, 1, 1, 1); MODULE_DEPEND(mxge, zlib, 1, 1, 1); static int mxge_load_firmware(mxge_softc_t *sc, int adopt); static int mxge_send_cmd(mxge_softc_t *sc, uint32_t cmd, mxge_cmd_t *data); static int mxge_close(mxge_softc_t *sc, int down); static int mxge_open(mxge_softc_t *sc); static void mxge_tick(void *arg); static int mxge_probe(device_t dev) { int rev; if ((pci_get_vendor(dev) == MXGE_PCI_VENDOR_MYRICOM) && ((pci_get_device(dev) == MXGE_PCI_DEVICE_Z8E) || (pci_get_device(dev) == MXGE_PCI_DEVICE_Z8E_9))) { rev = pci_get_revid(dev); switch (rev) { case MXGE_PCI_REV_Z8E: device_set_desc(dev, "Myri10G-PCIE-8A"); break; case MXGE_PCI_REV_Z8ES: device_set_desc(dev, "Myri10G-PCIE-8B"); break; default: device_set_desc(dev, "Myri10G-PCIE-8??"); device_printf(dev, "Unrecognized rev %d NIC\n", rev); break; } return 0; } return ENXIO; } static void mxge_enable_wc(mxge_softc_t *sc) { #if defined(__i386) || defined(__amd64) vm_offset_t len; int err; sc->wc = 1; len = rman_get_size(sc->mem_res); err = pmap_change_attr((vm_offset_t) sc->sram, len, PAT_WRITE_COMBINING); if (err != 0) { device_printf(sc->dev, "pmap_change_attr failed, %d\n", err); sc->wc = 0; } #endif } /* callback to get our DMA address */ static void mxge_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error == 0) { *(bus_addr_t *) arg = segs->ds_addr; } } static int mxge_dma_alloc(mxge_softc_t *sc, mxge_dma_t *dma, size_t bytes, bus_size_t alignment) { int err; device_t dev = sc->dev; bus_size_t boundary, maxsegsize; if (bytes > 4096 && alignment == 4096) { boundary = 0; maxsegsize = bytes; } else { boundary = 4096; maxsegsize = 4096; } /* allocate DMAable memory tags */ err = bus_dma_tag_create(sc->parent_dmat, /* parent */ alignment, /* alignment */ boundary, /* boundary */ BUS_SPACE_MAXADDR, /* low */ BUS_SPACE_MAXADDR, /* high */ NULL, NULL, /* filter */ bytes, /* maxsize */ 1, /* num segs */ maxsegsize, /* maxsegsize */ BUS_DMA_COHERENT, /* flags */ NULL, NULL, /* lock */ &dma->dmat); /* tag */ if (err != 0) { device_printf(dev, "couldn't alloc tag (err = %d)\n", err); return err; } /* allocate DMAable memory & map */ err = bus_dmamem_alloc(dma->dmat, &dma->addr, (BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO), &dma->map); if (err != 0) { device_printf(dev, "couldn't alloc mem (err = %d)\n", err); goto abort_with_dmat; } /* load the memory */ err = bus_dmamap_load(dma->dmat, dma->map, dma->addr, bytes, mxge_dmamap_callback, (void *)&dma->bus_addr, 0); if (err != 0) { device_printf(dev, "couldn't load map (err = %d)\n", err); goto abort_with_mem; } return 0; abort_with_mem: bus_dmamem_free(dma->dmat, dma->addr, dma->map); abort_with_dmat: (void)bus_dma_tag_destroy(dma->dmat); return err; } static void mxge_dma_free(mxge_dma_t *dma) { bus_dmamap_unload(dma->dmat, dma->map); bus_dmamem_free(dma->dmat, dma->addr, dma->map); (void)bus_dma_tag_destroy(dma->dmat); } /* * The eeprom strings on the lanaiX have the format * SN=x\0 * MAC=x:x:x:x:x:x\0 * PC=text\0 */ static int mxge_parse_strings(mxge_softc_t *sc) { char *ptr; int i, found_mac, found_sn2; char *endptr; ptr = sc->eeprom_strings; found_mac = 0; found_sn2 = 0; while (*ptr != '\0') { if (strncmp(ptr, "MAC=", 4) == 0) { ptr += 4; for (i = 0;;) { sc->mac_addr[i] = strtoul(ptr, &endptr, 16); if (endptr - ptr != 2) goto abort; ptr = endptr; if (++i == 6) break; if (*ptr++ != ':') goto abort; } found_mac = 1; } else if (strncmp(ptr, "PC=", 3) == 0) { ptr += 3; strlcpy(sc->product_code_string, ptr, sizeof(sc->product_code_string)); } else if (!found_sn2 && (strncmp(ptr, "SN=", 3) == 0)) { ptr += 3; strlcpy(sc->serial_number_string, ptr, sizeof(sc->serial_number_string)); } else if (strncmp(ptr, "SN2=", 4) == 0) { /* SN2 takes precedence over SN */ ptr += 4; found_sn2 = 1; strlcpy(sc->serial_number_string, ptr, sizeof(sc->serial_number_string)); } while (*ptr++ != '\0') {} } if (found_mac) return 0; abort: device_printf(sc->dev, "failed to parse eeprom_strings\n"); return ENXIO; } #if defined __i386 || defined i386 || defined __i386__ || defined __x86_64__ static void mxge_enable_nvidia_ecrc(mxge_softc_t *sc) { uint32_t val; unsigned long base, off; char *va, *cfgptr; device_t pdev, mcp55; uint16_t vendor_id, device_id, word; uintptr_t bus, slot, func, ivend, idev; uint32_t *ptr32; if (!mxge_nvidia_ecrc_enable) return; pdev = device_get_parent(device_get_parent(sc->dev)); if (pdev == NULL) { device_printf(sc->dev, "could not find parent?\n"); return; } vendor_id = pci_read_config(pdev, PCIR_VENDOR, 2); device_id = pci_read_config(pdev, PCIR_DEVICE, 2); if (vendor_id != 0x10de) return; base = 0; if (device_id == 0x005d) { /* ck804, base address is magic */ base = 0xe0000000UL; } else if (device_id >= 0x0374 && device_id <= 0x378) { /* mcp55, base address stored in chipset */ mcp55 = pci_find_bsf(0, 0, 0); if (mcp55 && 0x10de == pci_read_config(mcp55, PCIR_VENDOR, 2) && 0x0369 == pci_read_config(mcp55, PCIR_DEVICE, 2)) { word = pci_read_config(mcp55, 0x90, 2); base = ((unsigned long)word & 0x7ffeU) << 25; } } if (!base) return; /* XXXX Test below is commented because it is believed that doing config read/write beyond 0xff will access the config space for the next larger function. Uncomment this and remove the hacky pmap_mapdev() way of accessing config space when FreeBSD grows support for extended pcie config space access */ #if 0 /* See if we can, by some miracle, access the extended config space */ val = pci_read_config(pdev, 0x178, 4); if (val != 0xffffffff) { val |= 0x40; pci_write_config(pdev, 0x178, val, 4); return; } #endif /* Rather than using normal pci config space writes, we must * map the Nvidia config space ourselves. This is because on * opteron/nvidia class machine the 0xe000000 mapping is * handled by the nvidia chipset, that means the internal PCI * device (the on-chip northbridge), or the amd-8131 bridge * and things behind them are not visible by this method. */ BUS_READ_IVAR(device_get_parent(pdev), pdev, PCI_IVAR_BUS, &bus); BUS_READ_IVAR(device_get_parent(pdev), pdev, PCI_IVAR_SLOT, &slot); BUS_READ_IVAR(device_get_parent(pdev), pdev, PCI_IVAR_FUNCTION, &func); BUS_READ_IVAR(device_get_parent(pdev), pdev, PCI_IVAR_VENDOR, &ivend); BUS_READ_IVAR(device_get_parent(pdev), pdev, PCI_IVAR_DEVICE, &idev); off = base + 0x00100000UL * (unsigned long)bus + 0x00001000UL * (unsigned long)(func + 8 * slot); /* map it into the kernel */ va = pmap_mapdev(trunc_page((vm_paddr_t)off), PAGE_SIZE); if (va == NULL) { device_printf(sc->dev, "pmap_kenter_temporary didn't\n"); return; } /* get a pointer to the config space mapped into the kernel */ cfgptr = va + (off & PAGE_MASK); /* make sure that we can really access it */ vendor_id = *(uint16_t *)(cfgptr + PCIR_VENDOR); device_id = *(uint16_t *)(cfgptr + PCIR_DEVICE); if (! (vendor_id == ivend && device_id == idev)) { device_printf(sc->dev, "mapping failed: 0x%x:0x%x\n", vendor_id, device_id); pmap_unmapdev((vm_offset_t)va, PAGE_SIZE); return; } ptr32 = (uint32_t*)(cfgptr + 0x178); val = *ptr32; if (val == 0xffffffff) { device_printf(sc->dev, "extended mapping failed\n"); pmap_unmapdev((vm_offset_t)va, PAGE_SIZE); return; } *ptr32 = val | 0x40; pmap_unmapdev((vm_offset_t)va, PAGE_SIZE); if (mxge_verbose) device_printf(sc->dev, "Enabled ECRC on upstream Nvidia bridge " "at %d:%d:%d\n", (int)bus, (int)slot, (int)func); return; } #else static void mxge_enable_nvidia_ecrc(mxge_softc_t *sc) { device_printf(sc->dev, "Nforce 4 chipset on non-x86/amd64!?!?!\n"); return; } #endif static int mxge_dma_test(mxge_softc_t *sc, int test_type) { mxge_cmd_t cmd; bus_addr_t dmatest_bus = sc->dmabench_dma.bus_addr; int status; uint32_t len; char *test = " "; /* Run a small DMA test. * The magic multipliers to the length tell the firmware * to do DMA read, write, or read+write tests. The * results are returned in cmd.data0. The upper 16 * bits of the return is the number of transfers completed. * The lower 16 bits is the time in 0.5us ticks that the * transfers took to complete. */ len = sc->tx_boundary; cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus); cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus); cmd.data2 = len * 0x10000; status = mxge_send_cmd(sc, test_type, &cmd); if (status != 0) { test = "read"; goto abort; } sc->read_dma = ((cmd.data0>>16) * len * 2) / (cmd.data0 & 0xffff); cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus); cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus); cmd.data2 = len * 0x1; status = mxge_send_cmd(sc, test_type, &cmd); if (status != 0) { test = "write"; goto abort; } sc->write_dma = ((cmd.data0>>16) * len * 2) / (cmd.data0 & 0xffff); cmd.data0 = MXGE_LOWPART_TO_U32(dmatest_bus); cmd.data1 = MXGE_HIGHPART_TO_U32(dmatest_bus); cmd.data2 = len * 0x10001; status = mxge_send_cmd(sc, test_type, &cmd); if (status != 0) { test = "read/write"; goto abort; } sc->read_write_dma = ((cmd.data0>>16) * len * 2 * 2) / (cmd.data0 & 0xffff); abort: if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST) device_printf(sc->dev, "DMA %s benchmark failed: %d\n", test, status); return status; } /* * The Lanai Z8E PCI-E interface achieves higher Read-DMA throughput * when the PCI-E Completion packets are aligned on an 8-byte * boundary. Some PCI-E chip sets always align Completion packets; on * the ones that do not, the alignment can be enforced by enabling * ECRC generation (if supported). * * When PCI-E Completion packets are not aligned, it is actually more * efficient to limit Read-DMA transactions to 2KB, rather than 4KB. * * If the driver can neither enable ECRC nor verify that it has * already been enabled, then it must use a firmware image which works * around unaligned completion packets (ethp_z8e.dat), and it should * also ensure that it never gives the device a Read-DMA which is * larger than 2KB by setting the tx_boundary to 2KB. If ECRC is * enabled, then the driver should use the aligned (eth_z8e.dat) * firmware image, and set tx_boundary to 4KB. */ static int mxge_firmware_probe(mxge_softc_t *sc) { device_t dev = sc->dev; int reg, status; uint16_t pectl; sc->tx_boundary = 4096; /* * Verify the max read request size was set to 4KB * before trying the test with 4KB. */ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { pectl = pci_read_config(dev, reg + 0x8, 2); if ((pectl & (5 << 12)) != (5 << 12)) { device_printf(dev, "Max Read Req. size != 4k (0x%x\n", pectl); sc->tx_boundary = 2048; } } /* * load the optimized firmware (which assumes aligned PCIe * completions) in order to see if it works on this host. */ sc->fw_name = mxge_fw_aligned; status = mxge_load_firmware(sc, 1); if (status != 0) { return status; } /* * Enable ECRC if possible */ mxge_enable_nvidia_ecrc(sc); /* * Run a DMA test which watches for unaligned completions and * aborts on the first one seen. Not required on Z8ES or newer. */ if (pci_get_revid(sc->dev) >= MXGE_PCI_REV_Z8ES) return 0; status = mxge_dma_test(sc, MXGEFW_CMD_UNALIGNED_TEST); if (status == 0) return 0; /* keep the aligned firmware */ if (status != E2BIG) device_printf(dev, "DMA test failed: %d\n", status); if (status == ENOSYS) device_printf(dev, "Falling back to ethp! " "Please install up to date fw\n"); return status; } static int mxge_select_firmware(mxge_softc_t *sc) { int aligned = 0; int force_firmware = mxge_force_firmware; if (sc->throttle) force_firmware = sc->throttle; if (force_firmware != 0) { if (force_firmware == 1) aligned = 1; else aligned = 0; if (mxge_verbose) device_printf(sc->dev, "Assuming %s completions (forced)\n", aligned ? "aligned" : "unaligned"); goto abort; } /* if the PCIe link width is 4 or less, we can use the aligned firmware and skip any checks */ if (sc->link_width != 0 && sc->link_width <= 4) { device_printf(sc->dev, "PCIe x%d Link, expect reduced performance\n", sc->link_width); aligned = 1; goto abort; } if (0 == mxge_firmware_probe(sc)) return 0; abort: if (aligned) { sc->fw_name = mxge_fw_aligned; sc->tx_boundary = 4096; } else { sc->fw_name = mxge_fw_unaligned; sc->tx_boundary = 2048; } return (mxge_load_firmware(sc, 0)); } static int mxge_validate_firmware(mxge_softc_t *sc, const mcp_gen_header_t *hdr) { if (be32toh(hdr->mcp_type) != MCP_TYPE_ETH) { device_printf(sc->dev, "Bad firmware type: 0x%x\n", be32toh(hdr->mcp_type)); return EIO; } /* save firmware version for sysctl */ strlcpy(sc->fw_version, hdr->version, sizeof(sc->fw_version)); if (mxge_verbose) device_printf(sc->dev, "firmware id: %s\n", hdr->version); sscanf(sc->fw_version, "%d.%d.%d", &sc->fw_ver_major, &sc->fw_ver_minor, &sc->fw_ver_tiny); if (!(sc->fw_ver_major == MXGEFW_VERSION_MAJOR && sc->fw_ver_minor == MXGEFW_VERSION_MINOR)) { device_printf(sc->dev, "Found firmware version %s\n", sc->fw_version); device_printf(sc->dev, "Driver needs %d.%d\n", MXGEFW_VERSION_MAJOR, MXGEFW_VERSION_MINOR); return EINVAL; } return 0; } static void * z_alloc(void *nil, u_int items, u_int size) { void *ptr; - ptr = malloc(items * size, M_TEMP, M_NOWAIT); + ptr = mallocarray(items, size, M_TEMP, M_NOWAIT); return ptr; } static void z_free(void *nil, void *ptr) { free(ptr, M_TEMP); } static int mxge_load_firmware_helper(mxge_softc_t *sc, uint32_t *limit) { z_stream zs; char *inflate_buffer; const struct firmware *fw; const mcp_gen_header_t *hdr; unsigned hdr_offset; int status; unsigned int i; char dummy; size_t fw_len; fw = firmware_get(sc->fw_name); if (fw == NULL) { device_printf(sc->dev, "Could not find firmware image %s\n", sc->fw_name); return ENOENT; } /* setup zlib and decompress f/w */ bzero(&zs, sizeof (zs)); zs.zalloc = z_alloc; zs.zfree = z_free; status = inflateInit(&zs); if (status != Z_OK) { status = EIO; goto abort_with_fw; } /* the uncompressed size is stored as the firmware version, which would otherwise go unused */ fw_len = (size_t) fw->version; inflate_buffer = malloc(fw_len, M_TEMP, M_NOWAIT); if (inflate_buffer == NULL) goto abort_with_zs; zs.avail_in = fw->datasize; zs.next_in = __DECONST(char *, fw->data); zs.avail_out = fw_len; zs.next_out = inflate_buffer; status = inflate(&zs, Z_FINISH); if (status != Z_STREAM_END) { device_printf(sc->dev, "zlib %d\n", status); status = EIO; goto abort_with_buffer; } /* check id */ hdr_offset = htobe32(*(const uint32_t *) (inflate_buffer + MCP_HEADER_PTR_OFFSET)); if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > fw_len) { device_printf(sc->dev, "Bad firmware file"); status = EIO; goto abort_with_buffer; } hdr = (const void*)(inflate_buffer + hdr_offset); status = mxge_validate_firmware(sc, hdr); if (status != 0) goto abort_with_buffer; /* Copy the inflated firmware to NIC SRAM. */ for (i = 0; i < fw_len; i += 256) { mxge_pio_copy(sc->sram + MXGE_FW_OFFSET + i, inflate_buffer + i, min(256U, (unsigned)(fw_len - i))); wmb(); dummy = *sc->sram; wmb(); } *limit = fw_len; status = 0; abort_with_buffer: free(inflate_buffer, M_TEMP); abort_with_zs: inflateEnd(&zs); abort_with_fw: firmware_put(fw, FIRMWARE_UNLOAD); return status; } /* * Enable or disable periodic RDMAs from the host to make certain * chipsets resend dropped PCIe messages */ static void mxge_dummy_rdma(mxge_softc_t *sc, int enable) { char buf_bytes[72]; volatile uint32_t *confirm; volatile char *submit; uint32_t *buf, dma_low, dma_high; int i; buf = (uint32_t *)((unsigned long)(buf_bytes + 7) & ~7UL); /* clear confirmation addr */ confirm = (volatile uint32_t *)sc->cmd; *confirm = 0; wmb(); /* send an rdma command to the PCIe engine, and wait for the response in the confirmation address. The firmware should write a -1 there to indicate it is alive and well */ dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr); dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr); buf[0] = htobe32(dma_high); /* confirm addr MSW */ buf[1] = htobe32(dma_low); /* confirm addr LSW */ buf[2] = htobe32(0xffffffff); /* confirm data */ dma_low = MXGE_LOWPART_TO_U32(sc->zeropad_dma.bus_addr); dma_high = MXGE_HIGHPART_TO_U32(sc->zeropad_dma.bus_addr); buf[3] = htobe32(dma_high); /* dummy addr MSW */ buf[4] = htobe32(dma_low); /* dummy addr LSW */ buf[5] = htobe32(enable); /* enable? */ submit = (volatile char *)(sc->sram + MXGEFW_BOOT_DUMMY_RDMA); mxge_pio_copy(submit, buf, 64); wmb(); DELAY(1000); wmb(); i = 0; while (*confirm != 0xffffffff && i < 20) { DELAY(1000); i++; } if (*confirm != 0xffffffff) { device_printf(sc->dev, "dummy rdma %s failed (%p = 0x%x)", (enable ? "enable" : "disable"), confirm, *confirm); } return; } static int mxge_send_cmd(mxge_softc_t *sc, uint32_t cmd, mxge_cmd_t *data) { mcp_cmd_t *buf; char buf_bytes[sizeof(*buf) + 8]; volatile mcp_cmd_response_t *response = sc->cmd; volatile char *cmd_addr = sc->sram + MXGEFW_ETH_CMD; uint32_t dma_low, dma_high; int err, sleep_total = 0; /* ensure buf is aligned to 8 bytes */ buf = (mcp_cmd_t *)((unsigned long)(buf_bytes + 7) & ~7UL); buf->data0 = htobe32(data->data0); buf->data1 = htobe32(data->data1); buf->data2 = htobe32(data->data2); buf->cmd = htobe32(cmd); dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr); dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr); buf->response_addr.low = htobe32(dma_low); buf->response_addr.high = htobe32(dma_high); mtx_lock(&sc->cmd_mtx); response->result = 0xffffffff; wmb(); mxge_pio_copy((volatile void *)cmd_addr, buf, sizeof (*buf)); /* wait up to 20ms */ err = EAGAIN; for (sleep_total = 0; sleep_total < 20; sleep_total++) { bus_dmamap_sync(sc->cmd_dma.dmat, sc->cmd_dma.map, BUS_DMASYNC_POSTREAD); wmb(); switch (be32toh(response->result)) { case 0: data->data0 = be32toh(response->data); err = 0; break; case 0xffffffff: DELAY(1000); break; case MXGEFW_CMD_UNKNOWN: err = ENOSYS; break; case MXGEFW_CMD_ERROR_UNALIGNED: err = E2BIG; break; case MXGEFW_CMD_ERROR_BUSY: err = EBUSY; break; case MXGEFW_CMD_ERROR_I2C_ABSENT: err = ENXIO; break; default: device_printf(sc->dev, "mxge: command %d " "failed, result = %d\n", cmd, be32toh(response->result)); err = ENXIO; break; } if (err != EAGAIN) break; } if (err == EAGAIN) device_printf(sc->dev, "mxge: command %d timed out" "result = %d\n", cmd, be32toh(response->result)); mtx_unlock(&sc->cmd_mtx); return err; } static int mxge_adopt_running_firmware(mxge_softc_t *sc) { struct mcp_gen_header *hdr; const size_t bytes = sizeof (struct mcp_gen_header); size_t hdr_offset; int status; /* find running firmware header */ hdr_offset = htobe32(*(volatile uint32_t *) (sc->sram + MCP_HEADER_PTR_OFFSET)); if ((hdr_offset & 3) || hdr_offset + sizeof(*hdr) > sc->sram_size) { device_printf(sc->dev, "Running firmware has bad header offset (%d)\n", (int)hdr_offset); return EIO; } /* copy header of running firmware from SRAM to host memory to * validate firmware */ hdr = malloc(bytes, M_DEVBUF, M_NOWAIT); if (hdr == NULL) { device_printf(sc->dev, "could not malloc firmware hdr\n"); return ENOMEM; } bus_space_read_region_1(rman_get_bustag(sc->mem_res), rman_get_bushandle(sc->mem_res), hdr_offset, (char *)hdr, bytes); status = mxge_validate_firmware(sc, hdr); free(hdr, M_DEVBUF); /* * check to see if adopted firmware has bug where adopting * it will cause broadcasts to be filtered unless the NIC * is kept in ALLMULTI mode */ if (sc->fw_ver_major == 1 && sc->fw_ver_minor == 4 && sc->fw_ver_tiny >= 4 && sc->fw_ver_tiny <= 11) { sc->adopted_rx_filter_bug = 1; device_printf(sc->dev, "Adopting fw %d.%d.%d: " "working around rx filter bug\n", sc->fw_ver_major, sc->fw_ver_minor, sc->fw_ver_tiny); } return status; } static int mxge_load_firmware(mxge_softc_t *sc, int adopt) { volatile uint32_t *confirm; volatile char *submit; char buf_bytes[72]; uint32_t *buf, size, dma_low, dma_high; int status, i; buf = (uint32_t *)((unsigned long)(buf_bytes + 7) & ~7UL); size = sc->sram_size; status = mxge_load_firmware_helper(sc, &size); if (status) { if (!adopt) return status; /* Try to use the currently running firmware, if it is new enough */ status = mxge_adopt_running_firmware(sc); if (status) { device_printf(sc->dev, "failed to adopt running firmware\n"); return status; } device_printf(sc->dev, "Successfully adopted running firmware\n"); if (sc->tx_boundary == 4096) { device_printf(sc->dev, "Using firmware currently running on NIC" ". For optimal\n"); device_printf(sc->dev, "performance consider loading optimized " "firmware\n"); } sc->fw_name = mxge_fw_unaligned; sc->tx_boundary = 2048; return 0; } /* clear confirmation addr */ confirm = (volatile uint32_t *)sc->cmd; *confirm = 0; wmb(); /* send a reload command to the bootstrap MCP, and wait for the response in the confirmation address. The firmware should write a -1 there to indicate it is alive and well */ dma_low = MXGE_LOWPART_TO_U32(sc->cmd_dma.bus_addr); dma_high = MXGE_HIGHPART_TO_U32(sc->cmd_dma.bus_addr); buf[0] = htobe32(dma_high); /* confirm addr MSW */ buf[1] = htobe32(dma_low); /* confirm addr LSW */ buf[2] = htobe32(0xffffffff); /* confirm data */ /* FIX: All newest firmware should un-protect the bottom of the sram before handoff. However, the very first interfaces do not. Therefore the handoff copy must skip the first 8 bytes */ /* where the code starts*/ buf[3] = htobe32(MXGE_FW_OFFSET + 8); buf[4] = htobe32(size - 8); /* length of code */ buf[5] = htobe32(8); /* where to copy to */ buf[6] = htobe32(0); /* where to jump to */ submit = (volatile char *)(sc->sram + MXGEFW_BOOT_HANDOFF); mxge_pio_copy(submit, buf, 64); wmb(); DELAY(1000); wmb(); i = 0; while (*confirm != 0xffffffff && i < 20) { DELAY(1000*10); i++; bus_dmamap_sync(sc->cmd_dma.dmat, sc->cmd_dma.map, BUS_DMASYNC_POSTREAD); } if (*confirm != 0xffffffff) { device_printf(sc->dev,"handoff failed (%p = 0x%x)", confirm, *confirm); return ENXIO; } return 0; } static int mxge_update_mac_address(mxge_softc_t *sc) { mxge_cmd_t cmd; uint8_t *addr = sc->mac_addr; int status; cmd.data0 = ((addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) | addr[3]); cmd.data1 = ((addr[4] << 8) | (addr[5])); status = mxge_send_cmd(sc, MXGEFW_SET_MAC_ADDRESS, &cmd); return status; } static int mxge_change_pause(mxge_softc_t *sc, int pause) { mxge_cmd_t cmd; int status; if (pause) status = mxge_send_cmd(sc, MXGEFW_ENABLE_FLOW_CONTROL, &cmd); else status = mxge_send_cmd(sc, MXGEFW_DISABLE_FLOW_CONTROL, &cmd); if (status) { device_printf(sc->dev, "Failed to set flow control mode\n"); return ENXIO; } sc->pause = pause; return 0; } static void mxge_change_promisc(mxge_softc_t *sc, int promisc) { mxge_cmd_t cmd; int status; if (mxge_always_promisc) promisc = 1; if (promisc) status = mxge_send_cmd(sc, MXGEFW_ENABLE_PROMISC, &cmd); else status = mxge_send_cmd(sc, MXGEFW_DISABLE_PROMISC, &cmd); if (status) { device_printf(sc->dev, "Failed to set promisc mode\n"); } } static void mxge_set_multicast_list(mxge_softc_t *sc) { mxge_cmd_t cmd; struct ifmultiaddr *ifma; struct ifnet *ifp = sc->ifp; int err; /* This firmware is known to not support multicast */ if (!sc->fw_multicast_support) return; /* Disable multicast filtering while we play with the lists*/ err = mxge_send_cmd(sc, MXGEFW_ENABLE_ALLMULTI, &cmd); if (err != 0) { device_printf(sc->dev, "Failed MXGEFW_ENABLE_ALLMULTI," " error status: %d\n", err); return; } if (sc->adopted_rx_filter_bug) return; if (ifp->if_flags & IFF_ALLMULTI) /* request to disable multicast filtering, so quit here */ return; /* Flush all the filters */ err = mxge_send_cmd(sc, MXGEFW_LEAVE_ALL_MULTICAST_GROUPS, &cmd); if (err != 0) { device_printf(sc->dev, "Failed MXGEFW_LEAVE_ALL_MULTICAST_GROUPS" ", error status: %d\n", err); return; } /* Walk the multicast list, and add each address */ if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &cmd.data0, 4); bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr) + 4, &cmd.data1, 2); cmd.data0 = htonl(cmd.data0); cmd.data1 = htonl(cmd.data1); err = mxge_send_cmd(sc, MXGEFW_JOIN_MULTICAST_GROUP, &cmd); if (err != 0) { device_printf(sc->dev, "Failed " "MXGEFW_JOIN_MULTICAST_GROUP, error status:" "%d\t", err); /* abort, leaving multicast filtering off */ if_maddr_runlock(ifp); return; } } if_maddr_runlock(ifp); /* Enable multicast filtering */ err = mxge_send_cmd(sc, MXGEFW_DISABLE_ALLMULTI, &cmd); if (err != 0) { device_printf(sc->dev, "Failed MXGEFW_DISABLE_ALLMULTI" ", error status: %d\n", err); } } static int mxge_max_mtu(mxge_softc_t *sc) { mxge_cmd_t cmd; int status; if (MJUMPAGESIZE - MXGEFW_PAD > MXGEFW_MAX_MTU) return MXGEFW_MAX_MTU - MXGEFW_PAD; /* try to set nbufs to see if it we can use virtually contiguous jumbos */ cmd.data0 = 0; status = mxge_send_cmd(sc, MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS, &cmd); if (status == 0) return MXGEFW_MAX_MTU - MXGEFW_PAD; /* otherwise, we're limited to MJUMPAGESIZE */ return MJUMPAGESIZE - MXGEFW_PAD; } static int mxge_reset(mxge_softc_t *sc, int interrupts_setup) { struct mxge_slice_state *ss; mxge_rx_done_t *rx_done; volatile uint32_t *irq_claim; mxge_cmd_t cmd; int slice, status; /* try to send a reset command to the card to see if it is alive */ memset(&cmd, 0, sizeof (cmd)); status = mxge_send_cmd(sc, MXGEFW_CMD_RESET, &cmd); if (status != 0) { device_printf(sc->dev, "failed reset\n"); return ENXIO; } mxge_dummy_rdma(sc, 1); /* set the intrq size */ cmd.data0 = sc->rx_ring_size; status = mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd); /* * Even though we already know how many slices are supported * via mxge_slice_probe(), MXGEFW_CMD_GET_MAX_RSS_QUEUES * has magic side effects, and must be called after a reset. * It must be called prior to calling any RSS related cmds, * including assigning an interrupt queue for anything but * slice 0. It must also be called *after* * MXGEFW_CMD_SET_INTRQ_SIZE, since the intrq size is used by * the firmware to compute offsets. */ if (sc->num_slices > 1) { /* ask the maximum number of slices it supports */ status = mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd); if (status != 0) { device_printf(sc->dev, "failed to get number of slices\n"); return status; } /* * MXGEFW_CMD_ENABLE_RSS_QUEUES must be called prior * to setting up the interrupt queue DMA */ cmd.data0 = sc->num_slices; cmd.data1 = MXGEFW_SLICE_INTR_MODE_ONE_PER_SLICE; #ifdef IFNET_BUF_RING cmd.data1 |= MXGEFW_SLICE_ENABLE_MULTIPLE_TX_QUEUES; #endif status = mxge_send_cmd(sc, MXGEFW_CMD_ENABLE_RSS_QUEUES, &cmd); if (status != 0) { device_printf(sc->dev, "failed to set number of slices\n"); return status; } } if (interrupts_setup) { /* Now exchange information about interrupts */ for (slice = 0; slice < sc->num_slices; slice++) { rx_done = &sc->ss[slice].rx_done; memset(rx_done->entry, 0, sc->rx_ring_size); cmd.data0 = MXGE_LOWPART_TO_U32(rx_done->dma.bus_addr); cmd.data1 = MXGE_HIGHPART_TO_U32(rx_done->dma.bus_addr); cmd.data2 = slice; status |= mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_DMA, &cmd); } } status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_INTR_COAL_DELAY_OFFSET, &cmd); sc->intr_coal_delay_ptr = (volatile uint32_t *)(sc->sram + cmd.data0); status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_IRQ_ACK_OFFSET, &cmd); irq_claim = (volatile uint32_t *)(sc->sram + cmd.data0); status |= mxge_send_cmd(sc, MXGEFW_CMD_GET_IRQ_DEASSERT_OFFSET, &cmd); sc->irq_deassert = (volatile uint32_t *)(sc->sram + cmd.data0); if (status != 0) { device_printf(sc->dev, "failed set interrupt parameters\n"); return status; } *sc->intr_coal_delay_ptr = htobe32(sc->intr_coal_delay); /* run a DMA benchmark */ (void) mxge_dma_test(sc, MXGEFW_DMA_TEST); for (slice = 0; slice < sc->num_slices; slice++) { ss = &sc->ss[slice]; ss->irq_claim = irq_claim + (2 * slice); /* reset mcp/driver shared state back to 0 */ ss->rx_done.idx = 0; ss->rx_done.cnt = 0; ss->tx.req = 0; ss->tx.done = 0; ss->tx.pkt_done = 0; ss->tx.queue_active = 0; ss->tx.activate = 0; ss->tx.deactivate = 0; ss->tx.wake = 0; ss->tx.defrag = 0; ss->tx.stall = 0; ss->rx_big.cnt = 0; ss->rx_small.cnt = 0; ss->lc.lro_bad_csum = 0; ss->lc.lro_queued = 0; ss->lc.lro_flushed = 0; if (ss->fw_stats != NULL) { bzero(ss->fw_stats, sizeof *ss->fw_stats); } } sc->rdma_tags_available = 15; status = mxge_update_mac_address(sc); mxge_change_promisc(sc, sc->ifp->if_flags & IFF_PROMISC); mxge_change_pause(sc, sc->pause); mxge_set_multicast_list(sc); if (sc->throttle) { cmd.data0 = sc->throttle; if (mxge_send_cmd(sc, MXGEFW_CMD_SET_THROTTLE_FACTOR, &cmd)) { device_printf(sc->dev, "can't enable throttle\n"); } } return status; } static int mxge_change_throttle(SYSCTL_HANDLER_ARGS) { mxge_cmd_t cmd; mxge_softc_t *sc; int err; unsigned int throttle; sc = arg1; throttle = sc->throttle; err = sysctl_handle_int(oidp, &throttle, arg2, req); if (err != 0) { return err; } if (throttle == sc->throttle) return 0; if (throttle < MXGE_MIN_THROTTLE || throttle > MXGE_MAX_THROTTLE) return EINVAL; mtx_lock(&sc->driver_mtx); cmd.data0 = throttle; err = mxge_send_cmd(sc, MXGEFW_CMD_SET_THROTTLE_FACTOR, &cmd); if (err == 0) sc->throttle = throttle; mtx_unlock(&sc->driver_mtx); return err; } static int mxge_change_intr_coal(SYSCTL_HANDLER_ARGS) { mxge_softc_t *sc; unsigned int intr_coal_delay; int err; sc = arg1; intr_coal_delay = sc->intr_coal_delay; err = sysctl_handle_int(oidp, &intr_coal_delay, arg2, req); if (err != 0) { return err; } if (intr_coal_delay == sc->intr_coal_delay) return 0; if (intr_coal_delay == 0 || intr_coal_delay > 1000*1000) return EINVAL; mtx_lock(&sc->driver_mtx); *sc->intr_coal_delay_ptr = htobe32(intr_coal_delay); sc->intr_coal_delay = intr_coal_delay; mtx_unlock(&sc->driver_mtx); return err; } static int mxge_change_flow_control(SYSCTL_HANDLER_ARGS) { mxge_softc_t *sc; unsigned int enabled; int err; sc = arg1; enabled = sc->pause; err = sysctl_handle_int(oidp, &enabled, arg2, req); if (err != 0) { return err; } if (enabled == sc->pause) return 0; mtx_lock(&sc->driver_mtx); err = mxge_change_pause(sc, enabled); mtx_unlock(&sc->driver_mtx); return err; } static int mxge_handle_be32(SYSCTL_HANDLER_ARGS) { int err; if (arg1 == NULL) return EFAULT; arg2 = be32toh(*(int *)arg1); arg1 = NULL; err = sysctl_handle_int(oidp, arg1, arg2, req); return err; } static void mxge_rem_sysctls(mxge_softc_t *sc) { struct mxge_slice_state *ss; int slice; if (sc->slice_sysctl_tree == NULL) return; for (slice = 0; slice < sc->num_slices; slice++) { ss = &sc->ss[slice]; if (ss == NULL || ss->sysctl_tree == NULL) continue; sysctl_ctx_free(&ss->sysctl_ctx); ss->sysctl_tree = NULL; } sysctl_ctx_free(&sc->slice_sysctl_ctx); sc->slice_sysctl_tree = NULL; } static void mxge_add_sysctls(mxge_softc_t *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; mcp_irq_data_t *fw; struct mxge_slice_state *ss; int slice; char slice_num[8]; ctx = device_get_sysctl_ctx(sc->dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); fw = sc->ss[0].fw_stats; /* random information */ SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version, 0, "firmware version"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "serial_number", CTLFLAG_RD, sc->serial_number_string, 0, "serial number"); SYSCTL_ADD_STRING(ctx, children, OID_AUTO, "product_code", CTLFLAG_RD, sc->product_code_string, 0, "product_code"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "pcie_link_width", CTLFLAG_RD, &sc->link_width, 0, "tx_boundary"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_boundary", CTLFLAG_RD, &sc->tx_boundary, 0, "tx_boundary"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "write_combine", CTLFLAG_RD, &sc->wc, 0, "write combining PIO?"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "read_dma_MBs", CTLFLAG_RD, &sc->read_dma, 0, "DMA Read speed in MB/s"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "write_dma_MBs", CTLFLAG_RD, &sc->write_dma, 0, "DMA Write speed in MB/s"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "read_write_dma_MBs", CTLFLAG_RD, &sc->read_write_dma, 0, "DMA concurrent Read/Write speed in MB/s"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "watchdog_resets", CTLFLAG_RD, &sc->watchdog_resets, 0, "Number of times NIC was reset"); /* performance related tunables */ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "intr_coal_delay", CTLTYPE_INT|CTLFLAG_RW, sc, 0, mxge_change_intr_coal, "I", "interrupt coalescing delay in usecs"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "throttle", CTLTYPE_INT|CTLFLAG_RW, sc, 0, mxge_change_throttle, "I", "transmit throttling"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "flow_control_enabled", CTLTYPE_INT|CTLFLAG_RW, sc, 0, mxge_change_flow_control, "I", "interrupt coalescing delay in usecs"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "deassert_wait", CTLFLAG_RW, &mxge_deassert_wait, 0, "Wait for IRQ line to go low in ihandler"); /* stats block from firmware is in network byte order. Need to swap it */ SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "link_up", CTLTYPE_INT|CTLFLAG_RD, &fw->link_up, 0, mxge_handle_be32, "I", "link up"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rdma_tags_available", CTLTYPE_INT|CTLFLAG_RD, &fw->rdma_tags_available, 0, mxge_handle_be32, "I", "rdma_tags_available"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_bad_crc32", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_bad_crc32, 0, mxge_handle_be32, "I", "dropped_bad_crc32"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_bad_phy", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_bad_phy, 0, mxge_handle_be32, "I", "dropped_bad_phy"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_link_error_or_filtered", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_link_error_or_filtered, 0, mxge_handle_be32, "I", "dropped_link_error_or_filtered"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_link_overflow", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_link_overflow, 0, mxge_handle_be32, "I", "dropped_link_overflow"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_multicast_filtered", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_multicast_filtered, 0, mxge_handle_be32, "I", "dropped_multicast_filtered"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_no_big_buffer", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_no_big_buffer, 0, mxge_handle_be32, "I", "dropped_no_big_buffer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_no_small_buffer", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_no_small_buffer, 0, mxge_handle_be32, "I", "dropped_no_small_buffer"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_overrun", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_overrun, 0, mxge_handle_be32, "I", "dropped_overrun"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_pause", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_pause, 0, mxge_handle_be32, "I", "dropped_pause"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_runt", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_runt, 0, mxge_handle_be32, "I", "dropped_runt"); SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "dropped_unicast_filtered", CTLTYPE_INT|CTLFLAG_RD, &fw->dropped_unicast_filtered, 0, mxge_handle_be32, "I", "dropped_unicast_filtered"); /* verbose printing? */ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "verbose", CTLFLAG_RW, &mxge_verbose, 0, "verbose printing"); /* add counters exported for debugging from all slices */ sysctl_ctx_init(&sc->slice_sysctl_ctx); sc->slice_sysctl_tree = SYSCTL_ADD_NODE(&sc->slice_sysctl_ctx, children, OID_AUTO, "slice", CTLFLAG_RD, 0, ""); for (slice = 0; slice < sc->num_slices; slice++) { ss = &sc->ss[slice]; sysctl_ctx_init(&ss->sysctl_ctx); ctx = &ss->sysctl_ctx; children = SYSCTL_CHILDREN(sc->slice_sysctl_tree); sprintf(slice_num, "%d", slice); ss->sysctl_tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, slice_num, CTLFLAG_RD, 0, ""); children = SYSCTL_CHILDREN(ss->sysctl_tree); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_small_cnt", CTLFLAG_RD, &ss->rx_small.cnt, 0, "rx_small_cnt"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "rx_big_cnt", CTLFLAG_RD, &ss->rx_big.cnt, 0, "rx_small_cnt"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "lro_flushed", CTLFLAG_RD, &ss->lc.lro_flushed, 0, "number of lro merge queues flushed"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "lro_bad_csum", CTLFLAG_RD, &ss->lc.lro_bad_csum, 0, "number of bad csums preventing LRO"); SYSCTL_ADD_U64(ctx, children, OID_AUTO, "lro_queued", CTLFLAG_RD, &ss->lc.lro_queued, 0, "number of frames appended to lro merge" "queues"); #ifndef IFNET_BUF_RING /* only transmit from slice 0 for now */ if (slice > 0) continue; #endif SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_req", CTLFLAG_RD, &ss->tx.req, 0, "tx_req"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_done", CTLFLAG_RD, &ss->tx.done, 0, "tx_done"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_pkt_done", CTLFLAG_RD, &ss->tx.pkt_done, 0, "tx_done"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_stall", CTLFLAG_RD, &ss->tx.stall, 0, "tx_stall"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_wake", CTLFLAG_RD, &ss->tx.wake, 0, "tx_wake"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_defrag", CTLFLAG_RD, &ss->tx.defrag, 0, "tx_defrag"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_queue_active", CTLFLAG_RD, &ss->tx.queue_active, 0, "tx_queue_active"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_activate", CTLFLAG_RD, &ss->tx.activate, 0, "tx_activate"); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_deactivate", CTLFLAG_RD, &ss->tx.deactivate, 0, "tx_deactivate"); } } /* copy an array of mcp_kreq_ether_send_t's to the mcp. Copy backwards one at a time and handle ring wraps */ static inline void mxge_submit_req_backwards(mxge_tx_ring_t *tx, mcp_kreq_ether_send_t *src, int cnt) { int idx, starting_slot; starting_slot = tx->req; while (cnt > 1) { cnt--; idx = (starting_slot + cnt) & tx->mask; mxge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src)); wmb(); } } /* * copy an array of mcp_kreq_ether_send_t's to the mcp. Copy * at most 32 bytes at a time, so as to avoid involving the software * pio handler in the nic. We re-write the first segment's flags * to mark them valid only after writing the entire chain */ static inline void mxge_submit_req(mxge_tx_ring_t *tx, mcp_kreq_ether_send_t *src, int cnt) { int idx, i; uint32_t *src_ints; volatile uint32_t *dst_ints; mcp_kreq_ether_send_t *srcp; volatile mcp_kreq_ether_send_t *dstp, *dst; uint8_t last_flags; idx = tx->req & tx->mask; last_flags = src->flags; src->flags = 0; wmb(); dst = dstp = &tx->lanai[idx]; srcp = src; if ((idx + cnt) < tx->mask) { for (i = 0; i < (cnt - 1); i += 2) { mxge_pio_copy(dstp, srcp, 2 * sizeof(*src)); wmb(); /* force write every 32 bytes */ srcp += 2; dstp += 2; } } else { /* submit all but the first request, and ensure that it is submitted below */ mxge_submit_req_backwards(tx, src, cnt); i = 0; } if (i < cnt) { /* submit the first request */ mxge_pio_copy(dstp, srcp, sizeof(*src)); wmb(); /* barrier before setting valid flag */ } /* re-write the last 32-bits with the valid flags */ src->flags = last_flags; src_ints = (uint32_t *)src; src_ints+=3; dst_ints = (volatile uint32_t *)dst; dst_ints+=3; *dst_ints = *src_ints; tx->req += cnt; wmb(); } static int mxge_parse_tx(struct mxge_slice_state *ss, struct mbuf *m, struct mxge_pkt_info *pi) { struct ether_vlan_header *eh; uint16_t etype; int tso = m->m_pkthdr.csum_flags & (CSUM_TSO); #if IFCAP_TSO6 && defined(INET6) int nxt; #endif eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); pi->ip_off = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); pi->ip_off = ETHER_HDR_LEN; } switch (etype) { case ETHERTYPE_IP: /* * ensure ip header is in first mbuf, copy it to a * scratch buffer if not */ pi->ip = (struct ip *)(m->m_data + pi->ip_off); pi->ip6 = NULL; if (__predict_false(m->m_len < pi->ip_off + sizeof(*pi->ip))) { m_copydata(m, 0, pi->ip_off + sizeof(*pi->ip), ss->scratch); pi->ip = (struct ip *)(ss->scratch + pi->ip_off); } pi->ip_hlen = pi->ip->ip_hl << 2; if (!tso) return 0; if (__predict_false(m->m_len < pi->ip_off + pi->ip_hlen + sizeof(struct tcphdr))) { m_copydata(m, 0, pi->ip_off + pi->ip_hlen + sizeof(struct tcphdr), ss->scratch); pi->ip = (struct ip *)(ss->scratch + pi->ip_off); } pi->tcp = (struct tcphdr *)((char *)pi->ip + pi->ip_hlen); break; #if IFCAP_TSO6 && defined(INET6) case ETHERTYPE_IPV6: pi->ip6 = (struct ip6_hdr *)(m->m_data + pi->ip_off); if (__predict_false(m->m_len < pi->ip_off + sizeof(*pi->ip6))) { m_copydata(m, 0, pi->ip_off + sizeof(*pi->ip6), ss->scratch); pi->ip6 = (struct ip6_hdr *)(ss->scratch + pi->ip_off); } nxt = 0; pi->ip_hlen = ip6_lasthdr(m, pi->ip_off, IPPROTO_IPV6, &nxt); pi->ip_hlen -= pi->ip_off; if (nxt != IPPROTO_TCP && nxt != IPPROTO_UDP) return EINVAL; if (!tso) return 0; if (pi->ip_off + pi->ip_hlen > ss->sc->max_tso6_hlen) return EINVAL; if (__predict_false(m->m_len < pi->ip_off + pi->ip_hlen + sizeof(struct tcphdr))) { m_copydata(m, 0, pi->ip_off + pi->ip_hlen + sizeof(struct tcphdr), ss->scratch); pi->ip6 = (struct ip6_hdr *)(ss->scratch + pi->ip_off); } pi->tcp = (struct tcphdr *)((char *)pi->ip6 + pi->ip_hlen); break; #endif default: return EINVAL; } return 0; } #if IFCAP_TSO4 static void mxge_encap_tso(struct mxge_slice_state *ss, struct mbuf *m, int busdma_seg_cnt, struct mxge_pkt_info *pi) { mxge_tx_ring_t *tx; mcp_kreq_ether_send_t *req; bus_dma_segment_t *seg; uint32_t low, high_swapped; int len, seglen, cum_len, cum_len_next; int next_is_first, chop, cnt, rdma_count, small; uint16_t pseudo_hdr_offset, cksum_offset, mss, sum; uint8_t flags, flags_next; static int once; mss = m->m_pkthdr.tso_segsz; /* negative cum_len signifies to the * send loop that we are still in the * header portion of the TSO packet. */ cksum_offset = pi->ip_off + pi->ip_hlen; cum_len = -(cksum_offset + (pi->tcp->th_off << 2)); /* TSO implies checksum offload on this hardware */ if (__predict_false((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_TCP_IPV6)) == 0)) { /* * If packet has full TCP csum, replace it with pseudo hdr * sum that the NIC expects, otherwise the NIC will emit * packets with bad TCP checksums. */ m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum); if (pi->ip6) { #if (CSUM_TCP_IPV6 != 0) && defined(INET6) m->m_pkthdr.csum_flags |= CSUM_TCP_IPV6; sum = in6_cksum_pseudo(pi->ip6, m->m_pkthdr.len - cksum_offset, IPPROTO_TCP, 0); #endif } else { #ifdef INET m->m_pkthdr.csum_flags |= CSUM_TCP; sum = in_pseudo(pi->ip->ip_src.s_addr, pi->ip->ip_dst.s_addr, htons(IPPROTO_TCP + (m->m_pkthdr.len - cksum_offset))); #endif } m_copyback(m, offsetof(struct tcphdr, th_sum) + cksum_offset, sizeof(sum), (caddr_t)&sum); } flags = MXGEFW_FLAGS_TSO_HDR | MXGEFW_FLAGS_FIRST; /* for TSO, pseudo_hdr_offset holds mss. * The firmware figures out where to put * the checksum by parsing the header. */ pseudo_hdr_offset = htobe16(mss); if (pi->ip6) { /* * for IPv6 TSO, the "checksum offset" is re-purposed * to store the TCP header len */ cksum_offset = (pi->tcp->th_off << 2); } tx = &ss->tx; req = tx->req_list; seg = tx->seg_list; cnt = 0; rdma_count = 0; /* "rdma_count" is the number of RDMAs belonging to the * current packet BEFORE the current send request. For * non-TSO packets, this is equal to "count". * For TSO packets, rdma_count needs to be reset * to 0 after a segment cut. * * The rdma_count field of the send request is * the number of RDMAs of the packet starting at * that request. For TSO send requests with one ore more cuts * in the middle, this is the number of RDMAs starting * after the last cut in the request. All previous * segments before the last cut implicitly have 1 RDMA. * * Since the number of RDMAs is not known beforehand, * it must be filled-in retroactively - after each * segmentation cut or at the end of the entire packet. */ while (busdma_seg_cnt) { /* Break the busdma segment up into pieces*/ low = MXGE_LOWPART_TO_U32(seg->ds_addr); high_swapped = htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr)); len = seg->ds_len; while (len) { flags_next = flags & ~MXGEFW_FLAGS_FIRST; seglen = len; cum_len_next = cum_len + seglen; (req-rdma_count)->rdma_count = rdma_count + 1; if (__predict_true(cum_len >= 0)) { /* payload */ chop = (cum_len_next > mss); cum_len_next = cum_len_next % mss; next_is_first = (cum_len_next == 0); flags |= chop * MXGEFW_FLAGS_TSO_CHOP; flags_next |= next_is_first * MXGEFW_FLAGS_FIRST; rdma_count |= -(chop | next_is_first); rdma_count += chop & !next_is_first; } else if (cum_len_next >= 0) { /* header ends */ rdma_count = -1; cum_len_next = 0; seglen = -cum_len; small = (mss <= MXGEFW_SEND_SMALL_SIZE); flags_next = MXGEFW_FLAGS_TSO_PLD | MXGEFW_FLAGS_FIRST | (small * MXGEFW_FLAGS_SMALL); } req->addr_high = high_swapped; req->addr_low = htobe32(low); req->pseudo_hdr_offset = pseudo_hdr_offset; req->pad = 0; req->rdma_count = 1; req->length = htobe16(seglen); req->cksum_offset = cksum_offset; req->flags = flags | ((cum_len & 1) * MXGEFW_FLAGS_ALIGN_ODD); low += seglen; len -= seglen; cum_len = cum_len_next; flags = flags_next; req++; cnt++; rdma_count++; if (cksum_offset != 0 && !pi->ip6) { if (__predict_false(cksum_offset > seglen)) cksum_offset -= seglen; else cksum_offset = 0; } if (__predict_false(cnt > tx->max_desc)) goto drop; } busdma_seg_cnt--; seg++; } (req-rdma_count)->rdma_count = rdma_count; do { req--; req->flags |= MXGEFW_FLAGS_TSO_LAST; } while (!(req->flags & (MXGEFW_FLAGS_TSO_CHOP | MXGEFW_FLAGS_FIRST))); tx->info[((cnt - 1) + tx->req) & tx->mask].flag = 1; mxge_submit_req(tx, tx->req_list, cnt); #ifdef IFNET_BUF_RING if ((ss->sc->num_slices > 1) && tx->queue_active == 0) { /* tell the NIC to start polling this slice */ *tx->send_go = 1; tx->queue_active = 1; tx->activate++; wmb(); } #endif return; drop: bus_dmamap_unload(tx->dmat, tx->info[tx->req & tx->mask].map); m_freem(m); ss->oerrors++; if (!once) { printf("tx->max_desc exceeded via TSO!\n"); printf("mss = %d, %ld, %d!\n", mss, (long)seg - (long)tx->seg_list, tx->max_desc); once = 1; } return; } #endif /* IFCAP_TSO4 */ #ifdef MXGE_NEW_VLAN_API /* * We reproduce the software vlan tag insertion from * net/if_vlan.c:vlan_start() here so that we can advertise "hardware" * vlan tag insertion. We need to advertise this in order to have the * vlan interface respect our csum offload flags. */ static struct mbuf * mxge_vlan_tag_insert(struct mbuf *m) { struct ether_vlan_header *evl; M_PREPEND(m, ETHER_VLAN_ENCAP_LEN, M_NOWAIT); if (__predict_false(m == NULL)) return NULL; if (m->m_len < sizeof(*evl)) { m = m_pullup(m, sizeof(*evl)); if (__predict_false(m == NULL)) return NULL; } /* * Transform the Ethernet header into an Ethernet header * with 802.1Q encapsulation. */ evl = mtod(m, struct ether_vlan_header *); bcopy((char *)evl + ETHER_VLAN_ENCAP_LEN, (char *)evl, ETHER_HDR_LEN - ETHER_TYPE_LEN); evl->evl_encap_proto = htons(ETHERTYPE_VLAN); evl->evl_tag = htons(m->m_pkthdr.ether_vtag); m->m_flags &= ~M_VLANTAG; return m; } #endif /* MXGE_NEW_VLAN_API */ static void mxge_encap(struct mxge_slice_state *ss, struct mbuf *m) { struct mxge_pkt_info pi = {0,0,0,0}; mxge_softc_t *sc; mcp_kreq_ether_send_t *req; bus_dma_segment_t *seg; struct mbuf *m_tmp; struct ifnet *ifp; mxge_tx_ring_t *tx; int cnt, cum_len, err, i, idx, odd_flag; uint16_t pseudo_hdr_offset; uint8_t flags, cksum_offset; sc = ss->sc; ifp = sc->ifp; tx = &ss->tx; #ifdef MXGE_NEW_VLAN_API if (m->m_flags & M_VLANTAG) { m = mxge_vlan_tag_insert(m); if (__predict_false(m == NULL)) goto drop_without_m; } #endif if (m->m_pkthdr.csum_flags & (CSUM_TSO | CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) { if (mxge_parse_tx(ss, m, &pi)) goto drop; } /* (try to) map the frame for DMA */ idx = tx->req & tx->mask; err = bus_dmamap_load_mbuf_sg(tx->dmat, tx->info[idx].map, m, tx->seg_list, &cnt, BUS_DMA_NOWAIT); if (__predict_false(err == EFBIG)) { /* Too many segments in the chain. Try to defrag */ m_tmp = m_defrag(m, M_NOWAIT); if (m_tmp == NULL) { goto drop; } ss->tx.defrag++; m = m_tmp; err = bus_dmamap_load_mbuf_sg(tx->dmat, tx->info[idx].map, m, tx->seg_list, &cnt, BUS_DMA_NOWAIT); } if (__predict_false(err != 0)) { device_printf(sc->dev, "bus_dmamap_load_mbuf_sg returned %d" " packet len = %d\n", err, m->m_pkthdr.len); goto drop; } bus_dmamap_sync(tx->dmat, tx->info[idx].map, BUS_DMASYNC_PREWRITE); tx->info[idx].m = m; #if IFCAP_TSO4 /* TSO is different enough, we handle it in another routine */ if (m->m_pkthdr.csum_flags & (CSUM_TSO)) { mxge_encap_tso(ss, m, cnt, &pi); return; } #endif req = tx->req_list; cksum_offset = 0; pseudo_hdr_offset = 0; flags = MXGEFW_FLAGS_NO_TSO; /* checksum offloading? */ if (m->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_DATA_IPV6)) { /* ensure ip header is in first mbuf, copy it to a scratch buffer if not */ cksum_offset = pi.ip_off + pi.ip_hlen; pseudo_hdr_offset = cksum_offset + m->m_pkthdr.csum_data; pseudo_hdr_offset = htobe16(pseudo_hdr_offset); req->cksum_offset = cksum_offset; flags |= MXGEFW_FLAGS_CKSUM; odd_flag = MXGEFW_FLAGS_ALIGN_ODD; } else { odd_flag = 0; } if (m->m_pkthdr.len < MXGEFW_SEND_SMALL_SIZE) flags |= MXGEFW_FLAGS_SMALL; /* convert segments into a request list */ cum_len = 0; seg = tx->seg_list; req->flags = MXGEFW_FLAGS_FIRST; for (i = 0; i < cnt; i++) { req->addr_low = htobe32(MXGE_LOWPART_TO_U32(seg->ds_addr)); req->addr_high = htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr)); req->length = htobe16(seg->ds_len); req->cksum_offset = cksum_offset; if (cksum_offset > seg->ds_len) cksum_offset -= seg->ds_len; else cksum_offset = 0; req->pseudo_hdr_offset = pseudo_hdr_offset; req->pad = 0; /* complete solid 16-byte block */ req->rdma_count = 1; req->flags |= flags | ((cum_len & 1) * odd_flag); cum_len += seg->ds_len; seg++; req++; req->flags = 0; } req--; /* pad runts to 60 bytes */ if (cum_len < 60) { req++; req->addr_low = htobe32(MXGE_LOWPART_TO_U32(sc->zeropad_dma.bus_addr)); req->addr_high = htobe32(MXGE_HIGHPART_TO_U32(sc->zeropad_dma.bus_addr)); req->length = htobe16(60 - cum_len); req->cksum_offset = 0; req->pseudo_hdr_offset = pseudo_hdr_offset; req->pad = 0; /* complete solid 16-byte block */ req->rdma_count = 1; req->flags |= flags | ((cum_len & 1) * odd_flag); cnt++; } tx->req_list[0].rdma_count = cnt; #if 0 /* print what the firmware will see */ for (i = 0; i < cnt; i++) { printf("%d: addr: 0x%x 0x%x len:%d pso%d," "cso:%d, flags:0x%x, rdma:%d\n", i, (int)ntohl(tx->req_list[i].addr_high), (int)ntohl(tx->req_list[i].addr_low), (int)ntohs(tx->req_list[i].length), (int)ntohs(tx->req_list[i].pseudo_hdr_offset), tx->req_list[i].cksum_offset, tx->req_list[i].flags, tx->req_list[i].rdma_count); } printf("--------------\n"); #endif tx->info[((cnt - 1) + tx->req) & tx->mask].flag = 1; mxge_submit_req(tx, tx->req_list, cnt); #ifdef IFNET_BUF_RING if ((ss->sc->num_slices > 1) && tx->queue_active == 0) { /* tell the NIC to start polling this slice */ *tx->send_go = 1; tx->queue_active = 1; tx->activate++; wmb(); } #endif return; drop: m_freem(m); drop_without_m: ss->oerrors++; return; } #ifdef IFNET_BUF_RING static void mxge_qflush(struct ifnet *ifp) { mxge_softc_t *sc = ifp->if_softc; mxge_tx_ring_t *tx; struct mbuf *m; int slice; for (slice = 0; slice < sc->num_slices; slice++) { tx = &sc->ss[slice].tx; mtx_lock(&tx->mtx); while ((m = buf_ring_dequeue_sc(tx->br)) != NULL) m_freem(m); mtx_unlock(&tx->mtx); } if_qflush(ifp); } static inline void mxge_start_locked(struct mxge_slice_state *ss) { mxge_softc_t *sc; struct mbuf *m; struct ifnet *ifp; mxge_tx_ring_t *tx; sc = ss->sc; ifp = sc->ifp; tx = &ss->tx; while ((tx->mask - (tx->req - tx->done)) > tx->max_desc) { m = drbr_dequeue(ifp, tx->br); if (m == NULL) { return; } /* let BPF see it */ BPF_MTAP(ifp, m); /* give it to the nic */ mxge_encap(ss, m); } /* ran out of transmit slots */ if (((ss->if_drv_flags & IFF_DRV_OACTIVE) == 0) && (!drbr_empty(ifp, tx->br))) { ss->if_drv_flags |= IFF_DRV_OACTIVE; tx->stall++; } } static int mxge_transmit_locked(struct mxge_slice_state *ss, struct mbuf *m) { mxge_softc_t *sc; struct ifnet *ifp; mxge_tx_ring_t *tx; int err; sc = ss->sc; ifp = sc->ifp; tx = &ss->tx; if ((ss->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { err = drbr_enqueue(ifp, tx->br, m); return (err); } if (!drbr_needs_enqueue(ifp, tx->br) && ((tx->mask - (tx->req - tx->done)) > tx->max_desc)) { /* let BPF see it */ BPF_MTAP(ifp, m); /* give it to the nic */ mxge_encap(ss, m); } else if ((err = drbr_enqueue(ifp, tx->br, m)) != 0) { return (err); } if (!drbr_empty(ifp, tx->br)) mxge_start_locked(ss); return (0); } static int mxge_transmit(struct ifnet *ifp, struct mbuf *m) { mxge_softc_t *sc = ifp->if_softc; struct mxge_slice_state *ss; mxge_tx_ring_t *tx; int err = 0; int slice; slice = m->m_pkthdr.flowid; slice &= (sc->num_slices - 1); /* num_slices always power of 2 */ ss = &sc->ss[slice]; tx = &ss->tx; if (mtx_trylock(&tx->mtx)) { err = mxge_transmit_locked(ss, m); mtx_unlock(&tx->mtx); } else { err = drbr_enqueue(ifp, tx->br, m); } return (err); } #else static inline void mxge_start_locked(struct mxge_slice_state *ss) { mxge_softc_t *sc; struct mbuf *m; struct ifnet *ifp; mxge_tx_ring_t *tx; sc = ss->sc; ifp = sc->ifp; tx = &ss->tx; while ((tx->mask - (tx->req - tx->done)) > tx->max_desc) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m); if (m == NULL) { return; } /* let BPF see it */ BPF_MTAP(ifp, m); /* give it to the nic */ mxge_encap(ss, m); } /* ran out of transmit slots */ if ((sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) == 0) { sc->ifp->if_drv_flags |= IFF_DRV_OACTIVE; tx->stall++; } } #endif static void mxge_start(struct ifnet *ifp) { mxge_softc_t *sc = ifp->if_softc; struct mxge_slice_state *ss; /* only use the first slice for now */ ss = &sc->ss[0]; mtx_lock(&ss->tx.mtx); mxge_start_locked(ss); mtx_unlock(&ss->tx.mtx); } /* * copy an array of mcp_kreq_ether_recv_t's to the mcp. Copy * at most 32 bytes at a time, so as to avoid involving the software * pio handler in the nic. We re-write the first segment's low * DMA address to mark it valid only after we write the entire chunk * in a burst */ static inline void mxge_submit_8rx(volatile mcp_kreq_ether_recv_t *dst, mcp_kreq_ether_recv_t *src) { uint32_t low; low = src->addr_low; src->addr_low = 0xffffffff; mxge_pio_copy(dst, src, 4 * sizeof (*src)); wmb(); mxge_pio_copy(dst + 4, src + 4, 4 * sizeof (*src)); wmb(); src->addr_low = low; dst->addr_low = low; wmb(); } static int mxge_get_buf_small(struct mxge_slice_state *ss, bus_dmamap_t map, int idx) { bus_dma_segment_t seg; struct mbuf *m; mxge_rx_ring_t *rx = &ss->rx_small; int cnt, err; m = m_gethdr(M_NOWAIT, MT_DATA); if (m == NULL) { rx->alloc_fail++; err = ENOBUFS; goto done; } m->m_len = MHLEN; err = bus_dmamap_load_mbuf_sg(rx->dmat, map, m, &seg, &cnt, BUS_DMA_NOWAIT); if (err != 0) { m_free(m); goto done; } rx->info[idx].m = m; rx->shadow[idx].addr_low = htobe32(MXGE_LOWPART_TO_U32(seg.ds_addr)); rx->shadow[idx].addr_high = htobe32(MXGE_HIGHPART_TO_U32(seg.ds_addr)); done: if ((idx & 7) == 7) mxge_submit_8rx(&rx->lanai[idx - 7], &rx->shadow[idx - 7]); return err; } static int mxge_get_buf_big(struct mxge_slice_state *ss, bus_dmamap_t map, int idx) { bus_dma_segment_t seg[3]; struct mbuf *m; mxge_rx_ring_t *rx = &ss->rx_big; int cnt, err, i; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, rx->cl_size); if (m == NULL) { rx->alloc_fail++; err = ENOBUFS; goto done; } m->m_len = rx->mlen; err = bus_dmamap_load_mbuf_sg(rx->dmat, map, m, seg, &cnt, BUS_DMA_NOWAIT); if (err != 0) { m_free(m); goto done; } rx->info[idx].m = m; rx->shadow[idx].addr_low = htobe32(MXGE_LOWPART_TO_U32(seg->ds_addr)); rx->shadow[idx].addr_high = htobe32(MXGE_HIGHPART_TO_U32(seg->ds_addr)); #if MXGE_VIRT_JUMBOS for (i = 1; i < cnt; i++) { rx->shadow[idx + i].addr_low = htobe32(MXGE_LOWPART_TO_U32(seg[i].ds_addr)); rx->shadow[idx + i].addr_high = htobe32(MXGE_HIGHPART_TO_U32(seg[i].ds_addr)); } #endif done: for (i = 0; i < rx->nbufs; i++) { if ((idx & 7) == 7) { mxge_submit_8rx(&rx->lanai[idx - 7], &rx->shadow[idx - 7]); } idx++; } return err; } #ifdef INET6 static uint16_t mxge_csum_generic(uint16_t *raw, int len) { uint32_t csum; csum = 0; while (len > 0) { csum += *raw; raw++; len -= 2; } csum = (csum >> 16) + (csum & 0xffff); csum = (csum >> 16) + (csum & 0xffff); return (uint16_t)csum; } static inline uint16_t mxge_rx_csum6(void *p, struct mbuf *m, uint32_t csum) { uint32_t partial; int nxt, cksum_offset; struct ip6_hdr *ip6 = p; uint16_t c; nxt = ip6->ip6_nxt; cksum_offset = sizeof (*ip6) + ETHER_HDR_LEN; if (nxt != IPPROTO_TCP && nxt != IPPROTO_UDP) { cksum_offset = ip6_lasthdr(m, ETHER_HDR_LEN, IPPROTO_IPV6, &nxt); if (nxt != IPPROTO_TCP && nxt != IPPROTO_UDP) return (1); } /* * IPv6 headers do not contain a checksum, and hence * do not checksum to zero, so they don't "fall out" * of the partial checksum calculation like IPv4 * headers do. We need to fix the partial checksum by * subtracting the checksum of the IPv6 header. */ partial = mxge_csum_generic((uint16_t *)ip6, cksum_offset - ETHER_HDR_LEN); csum += ~partial; csum += (csum < ~partial); csum = (csum >> 16) + (csum & 0xFFFF); csum = (csum >> 16) + (csum & 0xFFFF); c = in6_cksum_pseudo(ip6, m->m_pkthdr.len - cksum_offset, nxt, csum); c ^= 0xffff; return (c); } #endif /* INET6 */ /* * Myri10GE hardware checksums are not valid if the sender * padded the frame with non-zero padding. This is because * the firmware just does a simple 16-bit 1s complement * checksum across the entire frame, excluding the first 14 * bytes. It is best to simply to check the checksum and * tell the stack about it only if the checksum is good */ static inline uint16_t mxge_rx_csum(struct mbuf *m, int csum) { struct ether_header *eh; #ifdef INET struct ip *ip; #endif #if defined(INET) || defined(INET6) int cap = m->m_pkthdr.rcvif->if_capenable; #endif uint16_t c, etype; eh = mtod(m, struct ether_header *); etype = ntohs(eh->ether_type); switch (etype) { #ifdef INET case ETHERTYPE_IP: if ((cap & IFCAP_RXCSUM) == 0) return (1); ip = (struct ip *)(eh + 1); if (ip->ip_p != IPPROTO_TCP && ip->ip_p != IPPROTO_UDP) return (1); c = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htonl(ntohs(csum) + ntohs(ip->ip_len) - (ip->ip_hl << 2) + ip->ip_p)); c ^= 0xffff; break; #endif #ifdef INET6 case ETHERTYPE_IPV6: if ((cap & IFCAP_RXCSUM_IPV6) == 0) return (1); c = mxge_rx_csum6((eh + 1), m, csum); break; #endif default: c = 1; } return (c); } static void mxge_vlan_tag_remove(struct mbuf *m, uint32_t *csum) { struct ether_vlan_header *evl; struct ether_header *eh; uint32_t partial; evl = mtod(m, struct ether_vlan_header *); eh = mtod(m, struct ether_header *); /* * fix checksum by subtracting ETHER_VLAN_ENCAP_LEN bytes * after what the firmware thought was the end of the ethernet * header. */ /* put checksum into host byte order */ *csum = ntohs(*csum); partial = ntohl(*(uint32_t *)(mtod(m, char *) + ETHER_HDR_LEN)); (*csum) += ~partial; (*csum) += ((*csum) < ~partial); (*csum) = ((*csum) >> 16) + ((*csum) & 0xFFFF); (*csum) = ((*csum) >> 16) + ((*csum) & 0xFFFF); /* restore checksum to network byte order; later consumers expect this */ *csum = htons(*csum); /* save the tag */ #ifdef MXGE_NEW_VLAN_API m->m_pkthdr.ether_vtag = ntohs(evl->evl_tag); #else { struct m_tag *mtag; mtag = m_tag_alloc(MTAG_VLAN, MTAG_VLAN_TAG, sizeof(u_int), M_NOWAIT); if (mtag == NULL) return; VLAN_TAG_VALUE(mtag) = ntohs(evl->evl_tag); m_tag_prepend(m, mtag); } #endif m->m_flags |= M_VLANTAG; /* * Remove the 802.1q header by copying the Ethernet * addresses over it and adjusting the beginning of * the data in the mbuf. The encapsulated Ethernet * type field is already in place. */ bcopy((char *)evl, (char *)evl + ETHER_VLAN_ENCAP_LEN, ETHER_HDR_LEN - ETHER_TYPE_LEN); m_adj(m, ETHER_VLAN_ENCAP_LEN); } static inline void mxge_rx_done_big(struct mxge_slice_state *ss, uint32_t len, uint32_t csum, int lro) { mxge_softc_t *sc; struct ifnet *ifp; struct mbuf *m; struct ether_header *eh; mxge_rx_ring_t *rx; bus_dmamap_t old_map; int idx; sc = ss->sc; ifp = sc->ifp; rx = &ss->rx_big; idx = rx->cnt & rx->mask; rx->cnt += rx->nbufs; /* save a pointer to the received mbuf */ m = rx->info[idx].m; /* try to replace the received mbuf */ if (mxge_get_buf_big(ss, rx->extra_map, idx)) { /* drop the frame -- the old mbuf is re-cycled */ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } /* unmap the received buffer */ old_map = rx->info[idx].map; bus_dmamap_sync(rx->dmat, old_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rx->dmat, old_map); /* swap the bus_dmamap_t's */ rx->info[idx].map = rx->extra_map; rx->extra_map = old_map; /* mcp implicitly skips 1st 2 bytes so that packet is properly * aligned */ m->m_data += MXGEFW_PAD; m->m_pkthdr.rcvif = ifp; m->m_len = m->m_pkthdr.len = len; ss->ipackets++; eh = mtod(m, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { mxge_vlan_tag_remove(m, &csum); } /* flowid only valid if RSS hashing is enabled */ if (sc->num_slices > 1) { m->m_pkthdr.flowid = (ss - sc->ss); M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); } /* if the checksum is valid, mark it in the mbuf header */ if ((ifp->if_capenable & (IFCAP_RXCSUM_IPV6 | IFCAP_RXCSUM)) && (0 == mxge_rx_csum(m, csum))) { /* Tell the stack that the checksum is good */ m->m_pkthdr.csum_data = 0xffff; m->m_pkthdr.csum_flags = CSUM_PSEUDO_HDR | CSUM_DATA_VALID; #if defined(INET) || defined (INET6) if (lro && (0 == tcp_lro_rx(&ss->lc, m, 0))) return; #endif } /* pass the frame up the stack */ (*ifp->if_input)(ifp, m); } static inline void mxge_rx_done_small(struct mxge_slice_state *ss, uint32_t len, uint32_t csum, int lro) { mxge_softc_t *sc; struct ifnet *ifp; struct ether_header *eh; struct mbuf *m; mxge_rx_ring_t *rx; bus_dmamap_t old_map; int idx; sc = ss->sc; ifp = sc->ifp; rx = &ss->rx_small; idx = rx->cnt & rx->mask; rx->cnt++; /* save a pointer to the received mbuf */ m = rx->info[idx].m; /* try to replace the received mbuf */ if (mxge_get_buf_small(ss, rx->extra_map, idx)) { /* drop the frame -- the old mbuf is re-cycled */ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); return; } /* unmap the received buffer */ old_map = rx->info[idx].map; bus_dmamap_sync(rx->dmat, old_map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rx->dmat, old_map); /* swap the bus_dmamap_t's */ rx->info[idx].map = rx->extra_map; rx->extra_map = old_map; /* mcp implicitly skips 1st 2 bytes so that packet is properly * aligned */ m->m_data += MXGEFW_PAD; m->m_pkthdr.rcvif = ifp; m->m_len = m->m_pkthdr.len = len; ss->ipackets++; eh = mtod(m, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { mxge_vlan_tag_remove(m, &csum); } /* flowid only valid if RSS hashing is enabled */ if (sc->num_slices > 1) { m->m_pkthdr.flowid = (ss - sc->ss); M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); } /* if the checksum is valid, mark it in the mbuf header */ if ((ifp->if_capenable & (IFCAP_RXCSUM_IPV6 | IFCAP_RXCSUM)) && (0 == mxge_rx_csum(m, csum))) { /* Tell the stack that the checksum is good */ m->m_pkthdr.csum_data = 0xffff; m->m_pkthdr.csum_flags = CSUM_PSEUDO_HDR | CSUM_DATA_VALID; #if defined(INET) || defined (INET6) if (lro && (0 == tcp_lro_rx(&ss->lc, m, csum))) return; #endif } /* pass the frame up the stack */ (*ifp->if_input)(ifp, m); } static inline void mxge_clean_rx_done(struct mxge_slice_state *ss) { mxge_rx_done_t *rx_done = &ss->rx_done; int limit = 0; uint16_t length; uint16_t checksum; int lro; lro = ss->sc->ifp->if_capenable & IFCAP_LRO; while (rx_done->entry[rx_done->idx].length != 0) { length = ntohs(rx_done->entry[rx_done->idx].length); rx_done->entry[rx_done->idx].length = 0; checksum = rx_done->entry[rx_done->idx].checksum; if (length <= (MHLEN - MXGEFW_PAD)) mxge_rx_done_small(ss, length, checksum, lro); else mxge_rx_done_big(ss, length, checksum, lro); rx_done->cnt++; rx_done->idx = rx_done->cnt & rx_done->mask; /* limit potential for livelock */ if (__predict_false(++limit > rx_done->mask / 2)) break; } #if defined(INET) || defined (INET6) tcp_lro_flush_all(&ss->lc); #endif } static inline void mxge_tx_done(struct mxge_slice_state *ss, uint32_t mcp_idx) { struct ifnet *ifp; mxge_tx_ring_t *tx; struct mbuf *m; bus_dmamap_t map; int idx; int *flags; tx = &ss->tx; ifp = ss->sc->ifp; while (tx->pkt_done != mcp_idx) { idx = tx->done & tx->mask; tx->done++; m = tx->info[idx].m; /* mbuf and DMA map only attached to the first segment per-mbuf */ if (m != NULL) { ss->obytes += m->m_pkthdr.len; if (m->m_flags & M_MCAST) ss->omcasts++; ss->opackets++; tx->info[idx].m = NULL; map = tx->info[idx].map; bus_dmamap_unload(tx->dmat, map); m_freem(m); } if (tx->info[idx].flag) { tx->info[idx].flag = 0; tx->pkt_done++; } } /* If we have space, clear IFF_OACTIVE to tell the stack that its OK to send packets */ #ifdef IFNET_BUF_RING flags = &ss->if_drv_flags; #else flags = &ifp->if_drv_flags; #endif mtx_lock(&ss->tx.mtx); if ((*flags) & IFF_DRV_OACTIVE && tx->req - tx->done < (tx->mask + 1)/4) { *(flags) &= ~IFF_DRV_OACTIVE; ss->tx.wake++; mxge_start_locked(ss); } #ifdef IFNET_BUF_RING if ((ss->sc->num_slices > 1) && (tx->req == tx->done)) { /* let the NIC stop polling this queue, since there * are no more transmits pending */ if (tx->req == tx->done) { *tx->send_stop = 1; tx->queue_active = 0; tx->deactivate++; wmb(); } } #endif mtx_unlock(&ss->tx.mtx); } static struct mxge_media_type mxge_xfp_media_types[] = { {IFM_10G_CX4, 0x7f, "10GBASE-CX4 (module)"}, {IFM_10G_SR, (1 << 7), "10GBASE-SR"}, {IFM_10G_LR, (1 << 6), "10GBASE-LR"}, {0, (1 << 5), "10GBASE-ER"}, {IFM_10G_LRM, (1 << 4), "10GBASE-LRM"}, {0, (1 << 3), "10GBASE-SW"}, {0, (1 << 2), "10GBASE-LW"}, {0, (1 << 1), "10GBASE-EW"}, {0, (1 << 0), "Reserved"} }; static struct mxge_media_type mxge_sfp_media_types[] = { {IFM_10G_TWINAX, 0, "10GBASE-Twinax"}, {0, (1 << 7), "Reserved"}, {IFM_10G_LRM, (1 << 6), "10GBASE-LRM"}, {IFM_10G_LR, (1 << 5), "10GBASE-LR"}, {IFM_10G_SR, (1 << 4), "10GBASE-SR"}, {IFM_10G_TWINAX,(1 << 0), "10GBASE-Twinax"} }; static void mxge_media_set(mxge_softc_t *sc, int media_type) { ifmedia_add(&sc->media, IFM_ETHER | IFM_FDX | media_type, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_FDX | media_type); sc->current_media = media_type; sc->media.ifm_media = sc->media.ifm_cur->ifm_media; } static void mxge_media_init(mxge_softc_t *sc) { char *ptr; int i; ifmedia_removeall(&sc->media); mxge_media_set(sc, IFM_AUTO); /* * parse the product code to deterimine the interface type * (CX4, XFP, Quad Ribbon Fiber) by looking at the character * after the 3rd dash in the driver's cached copy of the * EEPROM's product code string. */ ptr = sc->product_code_string; if (ptr == NULL) { device_printf(sc->dev, "Missing product code\n"); return; } for (i = 0; i < 3; i++, ptr++) { ptr = strchr(ptr, '-'); if (ptr == NULL) { device_printf(sc->dev, "only %d dashes in PC?!?\n", i); return; } } if (*ptr == 'C' || *(ptr +1) == 'C') { /* -C is CX4 */ sc->connector = MXGE_CX4; mxge_media_set(sc, IFM_10G_CX4); } else if (*ptr == 'Q') { /* -Q is Quad Ribbon Fiber */ sc->connector = MXGE_QRF; device_printf(sc->dev, "Quad Ribbon Fiber Media\n"); /* FreeBSD has no media type for Quad ribbon fiber */ } else if (*ptr == 'R') { /* -R is XFP */ sc->connector = MXGE_XFP; } else if (*ptr == 'S' || *(ptr +1) == 'S') { /* -S or -2S is SFP+ */ sc->connector = MXGE_SFP; } else { device_printf(sc->dev, "Unknown media type: %c\n", *ptr); } } /* * Determine the media type for a NIC. Some XFPs will identify * themselves only when their link is up, so this is initiated via a * link up interrupt. However, this can potentially take up to * several milliseconds, so it is run via the watchdog routine, rather * than in the interrupt handler itself. */ static void mxge_media_probe(mxge_softc_t *sc) { mxge_cmd_t cmd; char *cage_type; struct mxge_media_type *mxge_media_types = NULL; int i, err, ms, mxge_media_type_entries; uint32_t byte; sc->need_media_probe = 0; if (sc->connector == MXGE_XFP) { /* -R is XFP */ mxge_media_types = mxge_xfp_media_types; mxge_media_type_entries = nitems(mxge_xfp_media_types); byte = MXGE_XFP_COMPLIANCE_BYTE; cage_type = "XFP"; } else if (sc->connector == MXGE_SFP) { /* -S or -2S is SFP+ */ mxge_media_types = mxge_sfp_media_types; mxge_media_type_entries = nitems(mxge_sfp_media_types); cage_type = "SFP+"; byte = 3; } else { /* nothing to do; media type cannot change */ return; } /* * At this point we know the NIC has an XFP cage, so now we * try to determine what is in the cage by using the * firmware's XFP I2C commands to read the XFP 10GbE compilance * register. We read just one byte, which may take over * a millisecond */ cmd.data0 = 0; /* just fetch 1 byte, not all 256 */ cmd.data1 = byte; err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_READ, &cmd); if (err == MXGEFW_CMD_ERROR_I2C_FAILURE) { device_printf(sc->dev, "failed to read XFP\n"); } if (err == MXGEFW_CMD_ERROR_I2C_ABSENT) { device_printf(sc->dev, "Type R/S with no XFP!?!?\n"); } if (err != MXGEFW_CMD_OK) { return; } /* now we wait for the data to be cached */ cmd.data0 = byte; err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd); for (ms = 0; (err == EBUSY) && (ms < 50); ms++) { DELAY(1000); cmd.data0 = byte; err = mxge_send_cmd(sc, MXGEFW_CMD_I2C_BYTE, &cmd); } if (err != MXGEFW_CMD_OK) { device_printf(sc->dev, "failed to read %s (%d, %dms)\n", cage_type, err, ms); return; } if (cmd.data0 == mxge_media_types[0].bitmask) { if (mxge_verbose) device_printf(sc->dev, "%s:%s\n", cage_type, mxge_media_types[0].name); if (sc->current_media != mxge_media_types[0].flag) { mxge_media_init(sc); mxge_media_set(sc, mxge_media_types[0].flag); } return; } for (i = 1; i < mxge_media_type_entries; i++) { if (cmd.data0 & mxge_media_types[i].bitmask) { if (mxge_verbose) device_printf(sc->dev, "%s:%s\n", cage_type, mxge_media_types[i].name); if (sc->current_media != mxge_media_types[i].flag) { mxge_media_init(sc); mxge_media_set(sc, mxge_media_types[i].flag); } return; } } if (mxge_verbose) device_printf(sc->dev, "%s media 0x%x unknown\n", cage_type, cmd.data0); return; } static void mxge_intr(void *arg) { struct mxge_slice_state *ss = arg; mxge_softc_t *sc = ss->sc; mcp_irq_data_t *stats = ss->fw_stats; mxge_tx_ring_t *tx = &ss->tx; mxge_rx_done_t *rx_done = &ss->rx_done; uint32_t send_done_count; uint8_t valid; #ifndef IFNET_BUF_RING /* an interrupt on a non-zero slice is implicitly valid since MSI-X irqs are not shared */ if (ss != sc->ss) { mxge_clean_rx_done(ss); *ss->irq_claim = be32toh(3); return; } #endif /* make sure the DMA has finished */ if (!stats->valid) { return; } valid = stats->valid; if (sc->legacy_irq) { /* lower legacy IRQ */ *sc->irq_deassert = 0; if (!mxge_deassert_wait) /* don't wait for conf. that irq is low */ stats->valid = 0; } else { stats->valid = 0; } /* loop while waiting for legacy irq deassertion */ do { /* check for transmit completes and receives */ send_done_count = be32toh(stats->send_done_count); while ((send_done_count != tx->pkt_done) || (rx_done->entry[rx_done->idx].length != 0)) { if (send_done_count != tx->pkt_done) mxge_tx_done(ss, (int)send_done_count); mxge_clean_rx_done(ss); send_done_count = be32toh(stats->send_done_count); } if (sc->legacy_irq && mxge_deassert_wait) wmb(); } while (*((volatile uint8_t *) &stats->valid)); /* fw link & error stats meaningful only on the first slice */ if (__predict_false((ss == sc->ss) && stats->stats_updated)) { if (sc->link_state != stats->link_up) { sc->link_state = stats->link_up; if (sc->link_state) { if_link_state_change(sc->ifp, LINK_STATE_UP); if (mxge_verbose) device_printf(sc->dev, "link up\n"); } else { if_link_state_change(sc->ifp, LINK_STATE_DOWN); if (mxge_verbose) device_printf(sc->dev, "link down\n"); } sc->need_media_probe = 1; } if (sc->rdma_tags_available != be32toh(stats->rdma_tags_available)) { sc->rdma_tags_available = be32toh(stats->rdma_tags_available); device_printf(sc->dev, "RDMA timed out! %d tags " "left\n", sc->rdma_tags_available); } if (stats->link_down) { sc->down_cnt += stats->link_down; sc->link_state = 0; if_link_state_change(sc->ifp, LINK_STATE_DOWN); } } /* check to see if we have rx token to pass back */ if (valid & 0x1) *ss->irq_claim = be32toh(3); *(ss->irq_claim + 1) = be32toh(3); } static void mxge_init(void *arg) { mxge_softc_t *sc = arg; struct ifnet *ifp = sc->ifp; mtx_lock(&sc->driver_mtx); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) (void) mxge_open(sc); mtx_unlock(&sc->driver_mtx); } static void mxge_free_slice_mbufs(struct mxge_slice_state *ss) { int i; #if defined(INET) || defined(INET6) tcp_lro_free(&ss->lc); #endif for (i = 0; i <= ss->rx_big.mask; i++) { if (ss->rx_big.info[i].m == NULL) continue; bus_dmamap_unload(ss->rx_big.dmat, ss->rx_big.info[i].map); m_freem(ss->rx_big.info[i].m); ss->rx_big.info[i].m = NULL; } for (i = 0; i <= ss->rx_small.mask; i++) { if (ss->rx_small.info[i].m == NULL) continue; bus_dmamap_unload(ss->rx_small.dmat, ss->rx_small.info[i].map); m_freem(ss->rx_small.info[i].m); ss->rx_small.info[i].m = NULL; } /* transmit ring used only on the first slice */ if (ss->tx.info == NULL) return; for (i = 0; i <= ss->tx.mask; i++) { ss->tx.info[i].flag = 0; if (ss->tx.info[i].m == NULL) continue; bus_dmamap_unload(ss->tx.dmat, ss->tx.info[i].map); m_freem(ss->tx.info[i].m); ss->tx.info[i].m = NULL; } } static void mxge_free_mbufs(mxge_softc_t *sc) { int slice; for (slice = 0; slice < sc->num_slices; slice++) mxge_free_slice_mbufs(&sc->ss[slice]); } static void mxge_free_slice_rings(struct mxge_slice_state *ss) { int i; if (ss->rx_done.entry != NULL) mxge_dma_free(&ss->rx_done.dma); ss->rx_done.entry = NULL; if (ss->tx.req_bytes != NULL) free(ss->tx.req_bytes, M_DEVBUF); ss->tx.req_bytes = NULL; if (ss->tx.seg_list != NULL) free(ss->tx.seg_list, M_DEVBUF); ss->tx.seg_list = NULL; if (ss->rx_small.shadow != NULL) free(ss->rx_small.shadow, M_DEVBUF); ss->rx_small.shadow = NULL; if (ss->rx_big.shadow != NULL) free(ss->rx_big.shadow, M_DEVBUF); ss->rx_big.shadow = NULL; if (ss->tx.info != NULL) { if (ss->tx.dmat != NULL) { for (i = 0; i <= ss->tx.mask; i++) { bus_dmamap_destroy(ss->tx.dmat, ss->tx.info[i].map); } bus_dma_tag_destroy(ss->tx.dmat); } free(ss->tx.info, M_DEVBUF); } ss->tx.info = NULL; if (ss->rx_small.info != NULL) { if (ss->rx_small.dmat != NULL) { for (i = 0; i <= ss->rx_small.mask; i++) { bus_dmamap_destroy(ss->rx_small.dmat, ss->rx_small.info[i].map); } bus_dmamap_destroy(ss->rx_small.dmat, ss->rx_small.extra_map); bus_dma_tag_destroy(ss->rx_small.dmat); } free(ss->rx_small.info, M_DEVBUF); } ss->rx_small.info = NULL; if (ss->rx_big.info != NULL) { if (ss->rx_big.dmat != NULL) { for (i = 0; i <= ss->rx_big.mask; i++) { bus_dmamap_destroy(ss->rx_big.dmat, ss->rx_big.info[i].map); } bus_dmamap_destroy(ss->rx_big.dmat, ss->rx_big.extra_map); bus_dma_tag_destroy(ss->rx_big.dmat); } free(ss->rx_big.info, M_DEVBUF); } ss->rx_big.info = NULL; } static void mxge_free_rings(mxge_softc_t *sc) { int slice; for (slice = 0; slice < sc->num_slices; slice++) mxge_free_slice_rings(&sc->ss[slice]); } static int mxge_alloc_slice_rings(struct mxge_slice_state *ss, int rx_ring_entries, int tx_ring_entries) { mxge_softc_t *sc = ss->sc; size_t bytes; int err, i; /* allocate per-slice receive resources */ ss->rx_small.mask = ss->rx_big.mask = rx_ring_entries - 1; ss->rx_done.mask = (2 * rx_ring_entries) - 1; /* allocate the rx shadow rings */ bytes = rx_ring_entries * sizeof (*ss->rx_small.shadow); ss->rx_small.shadow = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); bytes = rx_ring_entries * sizeof (*ss->rx_big.shadow); ss->rx_big.shadow = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); /* allocate the rx host info rings */ bytes = rx_ring_entries * sizeof (*ss->rx_small.info); ss->rx_small.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); bytes = rx_ring_entries * sizeof (*ss->rx_big.info); ss->rx_big.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); /* allocate the rx busdma resources */ err = bus_dma_tag_create(sc->parent_dmat, /* parent */ 1, /* alignment */ 4096, /* boundary */ BUS_SPACE_MAXADDR, /* low */ BUS_SPACE_MAXADDR, /* high */ NULL, NULL, /* filter */ MHLEN, /* maxsize */ 1, /* num segs */ MHLEN, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lock */ &ss->rx_small.dmat); /* tag */ if (err != 0) { device_printf(sc->dev, "Err %d allocating rx_small dmat\n", err); return err; } err = bus_dma_tag_create(sc->parent_dmat, /* parent */ 1, /* alignment */ #if MXGE_VIRT_JUMBOS 4096, /* boundary */ #else 0, /* boundary */ #endif BUS_SPACE_MAXADDR, /* low */ BUS_SPACE_MAXADDR, /* high */ NULL, NULL, /* filter */ 3*4096, /* maxsize */ #if MXGE_VIRT_JUMBOS 3, /* num segs */ 4096, /* maxsegsize*/ #else 1, /* num segs */ MJUM9BYTES, /* maxsegsize*/ #endif BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lock */ &ss->rx_big.dmat); /* tag */ if (err != 0) { device_printf(sc->dev, "Err %d allocating rx_big dmat\n", err); return err; } for (i = 0; i <= ss->rx_small.mask; i++) { err = bus_dmamap_create(ss->rx_small.dmat, 0, &ss->rx_small.info[i].map); if (err != 0) { device_printf(sc->dev, "Err %d rx_small dmamap\n", err); return err; } } err = bus_dmamap_create(ss->rx_small.dmat, 0, &ss->rx_small.extra_map); if (err != 0) { device_printf(sc->dev, "Err %d extra rx_small dmamap\n", err); return err; } for (i = 0; i <= ss->rx_big.mask; i++) { err = bus_dmamap_create(ss->rx_big.dmat, 0, &ss->rx_big.info[i].map); if (err != 0) { device_printf(sc->dev, "Err %d rx_big dmamap\n", err); return err; } } err = bus_dmamap_create(ss->rx_big.dmat, 0, &ss->rx_big.extra_map); if (err != 0) { device_printf(sc->dev, "Err %d extra rx_big dmamap\n", err); return err; } /* now allocate TX resources */ #ifndef IFNET_BUF_RING /* only use a single TX ring for now */ if (ss != ss->sc->ss) return 0; #endif ss->tx.mask = tx_ring_entries - 1; ss->tx.max_desc = MIN(MXGE_MAX_SEND_DESC, tx_ring_entries / 4); /* allocate the tx request copy block */ bytes = 8 + sizeof (*ss->tx.req_list) * (ss->tx.max_desc + 4); ss->tx.req_bytes = malloc(bytes, M_DEVBUF, M_WAITOK); /* ensure req_list entries are aligned to 8 bytes */ ss->tx.req_list = (mcp_kreq_ether_send_t *) ((unsigned long)(ss->tx.req_bytes + 7) & ~7UL); /* allocate the tx busdma segment list */ bytes = sizeof (*ss->tx.seg_list) * ss->tx.max_desc; ss->tx.seg_list = (bus_dma_segment_t *) malloc(bytes, M_DEVBUF, M_WAITOK); /* allocate the tx host info ring */ bytes = tx_ring_entries * sizeof (*ss->tx.info); ss->tx.info = malloc(bytes, M_DEVBUF, M_ZERO|M_WAITOK); /* allocate the tx busdma resources */ err = bus_dma_tag_create(sc->parent_dmat, /* parent */ 1, /* alignment */ sc->tx_boundary, /* boundary */ BUS_SPACE_MAXADDR, /* low */ BUS_SPACE_MAXADDR, /* high */ NULL, NULL, /* filter */ 65536 + 256, /* maxsize */ ss->tx.max_desc - 2, /* num segs */ sc->tx_boundary, /* maxsegsz */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lock */ &ss->tx.dmat); /* tag */ if (err != 0) { device_printf(sc->dev, "Err %d allocating tx dmat\n", err); return err; } /* now use these tags to setup dmamaps for each slot in the ring */ for (i = 0; i <= ss->tx.mask; i++) { err = bus_dmamap_create(ss->tx.dmat, 0, &ss->tx.info[i].map); if (err != 0) { device_printf(sc->dev, "Err %d tx dmamap\n", err); return err; } } return 0; } static int mxge_alloc_rings(mxge_softc_t *sc) { mxge_cmd_t cmd; int tx_ring_size; int tx_ring_entries, rx_ring_entries; int err, slice; /* get ring sizes */ err = mxge_send_cmd(sc, MXGEFW_CMD_GET_SEND_RING_SIZE, &cmd); tx_ring_size = cmd.data0; if (err != 0) { device_printf(sc->dev, "Cannot determine tx ring sizes\n"); goto abort; } tx_ring_entries = tx_ring_size / sizeof (mcp_kreq_ether_send_t); rx_ring_entries = sc->rx_ring_size / sizeof (mcp_dma_addr_t); IFQ_SET_MAXLEN(&sc->ifp->if_snd, tx_ring_entries - 1); sc->ifp->if_snd.ifq_drv_maxlen = sc->ifp->if_snd.ifq_maxlen; IFQ_SET_READY(&sc->ifp->if_snd); for (slice = 0; slice < sc->num_slices; slice++) { err = mxge_alloc_slice_rings(&sc->ss[slice], rx_ring_entries, tx_ring_entries); if (err != 0) goto abort; } return 0; abort: mxge_free_rings(sc); return err; } static void mxge_choose_params(int mtu, int *big_buf_size, int *cl_size, int *nbufs) { int bufsize = mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + MXGEFW_PAD; if (bufsize < MCLBYTES) { /* easy, everything fits in a single buffer */ *big_buf_size = MCLBYTES; *cl_size = MCLBYTES; *nbufs = 1; return; } if (bufsize < MJUMPAGESIZE) { /* still easy, everything still fits in a single buffer */ *big_buf_size = MJUMPAGESIZE; *cl_size = MJUMPAGESIZE; *nbufs = 1; return; } #if MXGE_VIRT_JUMBOS /* now we need to use virtually contiguous buffers */ *cl_size = MJUM9BYTES; *big_buf_size = 4096; *nbufs = mtu / 4096 + 1; /* needs to be a power of two, so round up */ if (*nbufs == 3) *nbufs = 4; #else *cl_size = MJUM9BYTES; *big_buf_size = MJUM9BYTES; *nbufs = 1; #endif } static int mxge_slice_open(struct mxge_slice_state *ss, int nbufs, int cl_size) { mxge_softc_t *sc; mxge_cmd_t cmd; bus_dmamap_t map; int err, i, slice; sc = ss->sc; slice = ss - sc->ss; #if defined(INET) || defined(INET6) (void)tcp_lro_init(&ss->lc); #endif ss->lc.ifp = sc->ifp; /* get the lanai pointers to the send and receive rings */ err = 0; #ifndef IFNET_BUF_RING /* We currently only send from the first slice */ if (slice == 0) { #endif cmd.data0 = slice; err = mxge_send_cmd(sc, MXGEFW_CMD_GET_SEND_OFFSET, &cmd); ss->tx.lanai = (volatile mcp_kreq_ether_send_t *)(sc->sram + cmd.data0); ss->tx.send_go = (volatile uint32_t *) (sc->sram + MXGEFW_ETH_SEND_GO + 64 * slice); ss->tx.send_stop = (volatile uint32_t *) (sc->sram + MXGEFW_ETH_SEND_STOP + 64 * slice); #ifndef IFNET_BUF_RING } #endif cmd.data0 = slice; err |= mxge_send_cmd(sc, MXGEFW_CMD_GET_SMALL_RX_OFFSET, &cmd); ss->rx_small.lanai = (volatile mcp_kreq_ether_recv_t *)(sc->sram + cmd.data0); cmd.data0 = slice; err |= mxge_send_cmd(sc, MXGEFW_CMD_GET_BIG_RX_OFFSET, &cmd); ss->rx_big.lanai = (volatile mcp_kreq_ether_recv_t *)(sc->sram + cmd.data0); if (err != 0) { device_printf(sc->dev, "failed to get ring sizes or locations\n"); return EIO; } /* stock receive rings */ for (i = 0; i <= ss->rx_small.mask; i++) { map = ss->rx_small.info[i].map; err = mxge_get_buf_small(ss, map, i); if (err) { device_printf(sc->dev, "alloced %d/%d smalls\n", i, ss->rx_small.mask + 1); return ENOMEM; } } for (i = 0; i <= ss->rx_big.mask; i++) { ss->rx_big.shadow[i].addr_low = 0xffffffff; ss->rx_big.shadow[i].addr_high = 0xffffffff; } ss->rx_big.nbufs = nbufs; ss->rx_big.cl_size = cl_size; ss->rx_big.mlen = ss->sc->ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + MXGEFW_PAD; for (i = 0; i <= ss->rx_big.mask; i += ss->rx_big.nbufs) { map = ss->rx_big.info[i].map; err = mxge_get_buf_big(ss, map, i); if (err) { device_printf(sc->dev, "alloced %d/%d bigs\n", i, ss->rx_big.mask + 1); return ENOMEM; } } return 0; } static int mxge_open(mxge_softc_t *sc) { mxge_cmd_t cmd; int err, big_bytes, nbufs, slice, cl_size, i; bus_addr_t bus; volatile uint8_t *itable; struct mxge_slice_state *ss; /* Copy the MAC address in case it was overridden */ bcopy(IF_LLADDR(sc->ifp), sc->mac_addr, ETHER_ADDR_LEN); err = mxge_reset(sc, 1); if (err != 0) { device_printf(sc->dev, "failed to reset\n"); return EIO; } if (sc->num_slices > 1) { /* setup the indirection table */ cmd.data0 = sc->num_slices; err = mxge_send_cmd(sc, MXGEFW_CMD_SET_RSS_TABLE_SIZE, &cmd); err |= mxge_send_cmd(sc, MXGEFW_CMD_GET_RSS_TABLE_OFFSET, &cmd); if (err != 0) { device_printf(sc->dev, "failed to setup rss tables\n"); return err; } /* just enable an identity mapping */ itable = sc->sram + cmd.data0; for (i = 0; i < sc->num_slices; i++) itable[i] = (uint8_t)i; cmd.data0 = 1; cmd.data1 = mxge_rss_hash_type; err = mxge_send_cmd(sc, MXGEFW_CMD_SET_RSS_ENABLE, &cmd); if (err != 0) { device_printf(sc->dev, "failed to enable slices\n"); return err; } } mxge_choose_params(sc->ifp->if_mtu, &big_bytes, &cl_size, &nbufs); cmd.data0 = nbufs; err = mxge_send_cmd(sc, MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS, &cmd); /* error is only meaningful if we're trying to set MXGEFW_CMD_ALWAYS_USE_N_BIG_BUFFERS > 1 */ if (err && nbufs > 1) { device_printf(sc->dev, "Failed to set alway-use-n to %d\n", nbufs); return EIO; } /* Give the firmware the mtu and the big and small buffer sizes. The firmware wants the big buf size to be a power of two. Luckily, FreeBSD's clusters are powers of two */ cmd.data0 = sc->ifp->if_mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; err = mxge_send_cmd(sc, MXGEFW_CMD_SET_MTU, &cmd); cmd.data0 = MHLEN - MXGEFW_PAD; err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_SMALL_BUFFER_SIZE, &cmd); cmd.data0 = big_bytes; err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_BIG_BUFFER_SIZE, &cmd); if (err != 0) { device_printf(sc->dev, "failed to setup params\n"); goto abort; } /* Now give him the pointer to the stats block */ for (slice = 0; #ifdef IFNET_BUF_RING slice < sc->num_slices; #else slice < 1; #endif slice++) { ss = &sc->ss[slice]; cmd.data0 = MXGE_LOWPART_TO_U32(ss->fw_stats_dma.bus_addr); cmd.data1 = MXGE_HIGHPART_TO_U32(ss->fw_stats_dma.bus_addr); cmd.data2 = sizeof(struct mcp_irq_data); cmd.data2 |= (slice << 16); err |= mxge_send_cmd(sc, MXGEFW_CMD_SET_STATS_DMA_V2, &cmd); } if (err != 0) { bus = sc->ss->fw_stats_dma.bus_addr; bus += offsetof(struct mcp_irq_data, send_done_count); cmd.data0 = MXGE_LOWPART_TO_U32(bus); cmd.data1 = MXGE_HIGHPART_TO_U32(bus); err = mxge_send_cmd(sc, MXGEFW_CMD_SET_STATS_DMA_OBSOLETE, &cmd); /* Firmware cannot support multicast without STATS_DMA_V2 */ sc->fw_multicast_support = 0; } else { sc->fw_multicast_support = 1; } if (err != 0) { device_printf(sc->dev, "failed to setup params\n"); goto abort; } for (slice = 0; slice < sc->num_slices; slice++) { err = mxge_slice_open(&sc->ss[slice], nbufs, cl_size); if (err != 0) { device_printf(sc->dev, "couldn't open slice %d\n", slice); goto abort; } } /* Finally, start the firmware running */ err = mxge_send_cmd(sc, MXGEFW_CMD_ETHERNET_UP, &cmd); if (err) { device_printf(sc->dev, "Couldn't bring up link\n"); goto abort; } #ifdef IFNET_BUF_RING for (slice = 0; slice < sc->num_slices; slice++) { ss = &sc->ss[slice]; ss->if_drv_flags |= IFF_DRV_RUNNING; ss->if_drv_flags &= ~IFF_DRV_OACTIVE; } #endif sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; return 0; abort: mxge_free_mbufs(sc); return err; } static int mxge_close(mxge_softc_t *sc, int down) { mxge_cmd_t cmd; int err, old_down_cnt; #ifdef IFNET_BUF_RING struct mxge_slice_state *ss; int slice; #endif #ifdef IFNET_BUF_RING for (slice = 0; slice < sc->num_slices; slice++) { ss = &sc->ss[slice]; ss->if_drv_flags &= ~IFF_DRV_RUNNING; } #endif sc->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; if (!down) { old_down_cnt = sc->down_cnt; wmb(); err = mxge_send_cmd(sc, MXGEFW_CMD_ETHERNET_DOWN, &cmd); if (err) { device_printf(sc->dev, "Couldn't bring down link\n"); } if (old_down_cnt == sc->down_cnt) { /* wait for down irq */ DELAY(10 * sc->intr_coal_delay); } wmb(); if (old_down_cnt == sc->down_cnt) { device_printf(sc->dev, "never got down irq\n"); } } mxge_free_mbufs(sc); return 0; } static void mxge_setup_cfg_space(mxge_softc_t *sc) { device_t dev = sc->dev; int reg; uint16_t lnk, pectl; /* find the PCIe link width and set max read request to 4KB*/ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { lnk = pci_read_config(dev, reg + 0x12, 2); sc->link_width = (lnk >> 4) & 0x3f; if (sc->pectl == 0) { pectl = pci_read_config(dev, reg + 0x8, 2); pectl = (pectl & ~0x7000) | (5 << 12); pci_write_config(dev, reg + 0x8, pectl, 2); sc->pectl = pectl; } else { /* restore saved pectl after watchdog reset */ pci_write_config(dev, reg + 0x8, sc->pectl, 2); } } /* Enable DMA and Memory space access */ pci_enable_busmaster(dev); } static uint32_t mxge_read_reboot(mxge_softc_t *sc) { device_t dev = sc->dev; uint32_t vs; /* find the vendor specific offset */ if (pci_find_cap(dev, PCIY_VENDOR, &vs) != 0) { device_printf(sc->dev, "could not find vendor specific offset\n"); return (uint32_t)-1; } /* enable read32 mode */ pci_write_config(dev, vs + 0x10, 0x3, 1); /* tell NIC which register to read */ pci_write_config(dev, vs + 0x18, 0xfffffff0, 4); return (pci_read_config(dev, vs + 0x14, 4)); } static void mxge_watchdog_reset(mxge_softc_t *sc) { struct pci_devinfo *dinfo; struct mxge_slice_state *ss; int err, running, s, num_tx_slices = 1; uint32_t reboot; uint16_t cmd; err = ENXIO; device_printf(sc->dev, "Watchdog reset!\n"); /* * check to see if the NIC rebooted. If it did, then all of * PCI config space has been reset, and things like the * busmaster bit will be zero. If this is the case, then we * must restore PCI config space before the NIC can be used * again */ cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2); if (cmd == 0xffff) { /* * maybe the watchdog caught the NIC rebooting; wait * up to 100ms for it to finish. If it does not come * back, then give up */ DELAY(1000*100); cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2); if (cmd == 0xffff) { device_printf(sc->dev, "NIC disappeared!\n"); } } if ((cmd & PCIM_CMD_BUSMASTEREN) == 0) { /* print the reboot status */ reboot = mxge_read_reboot(sc); device_printf(sc->dev, "NIC rebooted, status = 0x%x\n", reboot); running = sc->ifp->if_drv_flags & IFF_DRV_RUNNING; if (running) { /* * quiesce NIC so that TX routines will not try to * xmit after restoration of BAR */ /* Mark the link as down */ if (sc->link_state) { sc->link_state = 0; if_link_state_change(sc->ifp, LINK_STATE_DOWN); } #ifdef IFNET_BUF_RING num_tx_slices = sc->num_slices; #endif /* grab all TX locks to ensure no tx */ for (s = 0; s < num_tx_slices; s++) { ss = &sc->ss[s]; mtx_lock(&ss->tx.mtx); } mxge_close(sc, 1); } /* restore PCI configuration space */ dinfo = device_get_ivars(sc->dev); pci_cfg_restore(sc->dev, dinfo); /* and redo any changes we made to our config space */ mxge_setup_cfg_space(sc); /* reload f/w */ err = mxge_load_firmware(sc, 0); if (err) { device_printf(sc->dev, "Unable to re-load f/w\n"); } if (running) { if (!err) err = mxge_open(sc); /* release all TX locks */ for (s = 0; s < num_tx_slices; s++) { ss = &sc->ss[s]; #ifdef IFNET_BUF_RING mxge_start_locked(ss); #endif mtx_unlock(&ss->tx.mtx); } } sc->watchdog_resets++; } else { device_printf(sc->dev, "NIC did not reboot, not resetting\n"); err = 0; } if (err) { device_printf(sc->dev, "watchdog reset failed\n"); } else { if (sc->dying == 2) sc->dying = 0; callout_reset(&sc->co_hdl, mxge_ticks, mxge_tick, sc); } } static void mxge_watchdog_task(void *arg, int pending) { mxge_softc_t *sc = arg; mtx_lock(&sc->driver_mtx); mxge_watchdog_reset(sc); mtx_unlock(&sc->driver_mtx); } static void mxge_warn_stuck(mxge_softc_t *sc, mxge_tx_ring_t *tx, int slice) { tx = &sc->ss[slice].tx; device_printf(sc->dev, "slice %d struck? ring state:\n", slice); device_printf(sc->dev, "tx.req=%d tx.done=%d, tx.queue_active=%d\n", tx->req, tx->done, tx->queue_active); device_printf(sc->dev, "tx.activate=%d tx.deactivate=%d\n", tx->activate, tx->deactivate); device_printf(sc->dev, "pkt_done=%d fw=%d\n", tx->pkt_done, be32toh(sc->ss->fw_stats->send_done_count)); } static int mxge_watchdog(mxge_softc_t *sc) { mxge_tx_ring_t *tx; uint32_t rx_pause = be32toh(sc->ss->fw_stats->dropped_pause); int i, err = 0; /* see if we have outstanding transmits, which have been pending for more than mxge_ticks */ for (i = 0; #ifdef IFNET_BUF_RING (i < sc->num_slices) && (err == 0); #else (i < 1) && (err == 0); #endif i++) { tx = &sc->ss[i].tx; if (tx->req != tx->done && tx->watchdog_req != tx->watchdog_done && tx->done == tx->watchdog_done) { /* check for pause blocking before resetting */ if (tx->watchdog_rx_pause == rx_pause) { mxge_warn_stuck(sc, tx, i); taskqueue_enqueue(sc->tq, &sc->watchdog_task); return (ENXIO); } else device_printf(sc->dev, "Flow control blocking " "xmits, check link partner\n"); } tx->watchdog_req = tx->req; tx->watchdog_done = tx->done; tx->watchdog_rx_pause = rx_pause; } if (sc->need_media_probe) mxge_media_probe(sc); return (err); } static uint64_t mxge_get_counter(struct ifnet *ifp, ift_counter cnt) { struct mxge_softc *sc; uint64_t rv; sc = if_getsoftc(ifp); rv = 0; switch (cnt) { case IFCOUNTER_IPACKETS: for (int s = 0; s < sc->num_slices; s++) rv += sc->ss[s].ipackets; return (rv); case IFCOUNTER_OPACKETS: for (int s = 0; s < sc->num_slices; s++) rv += sc->ss[s].opackets; return (rv); case IFCOUNTER_OERRORS: for (int s = 0; s < sc->num_slices; s++) rv += sc->ss[s].oerrors; return (rv); #ifdef IFNET_BUF_RING case IFCOUNTER_OBYTES: for (int s = 0; s < sc->num_slices; s++) rv += sc->ss[s].obytes; return (rv); case IFCOUNTER_OMCASTS: for (int s = 0; s < sc->num_slices; s++) rv += sc->ss[s].omcasts; return (rv); case IFCOUNTER_OQDROPS: for (int s = 0; s < sc->num_slices; s++) rv += sc->ss[s].tx.br->br_drops; return (rv); #endif default: return (if_get_counter_default(ifp, cnt)); } } static void mxge_tick(void *arg) { mxge_softc_t *sc = arg; u_long pkts = 0; int err = 0; int running, ticks; uint16_t cmd; ticks = mxge_ticks; running = sc->ifp->if_drv_flags & IFF_DRV_RUNNING; if (running) { if (!sc->watchdog_countdown) { err = mxge_watchdog(sc); sc->watchdog_countdown = 4; } sc->watchdog_countdown--; } if (pkts == 0) { /* ensure NIC did not suffer h/w fault while idle */ cmd = pci_read_config(sc->dev, PCIR_COMMAND, 2); if ((cmd & PCIM_CMD_BUSMASTEREN) == 0) { sc->dying = 2; taskqueue_enqueue(sc->tq, &sc->watchdog_task); err = ENXIO; } /* look less often if NIC is idle */ ticks *= 4; } if (err == 0) callout_reset(&sc->co_hdl, ticks, mxge_tick, sc); } static int mxge_media_change(struct ifnet *ifp) { return EINVAL; } static int mxge_change_mtu(mxge_softc_t *sc, int mtu) { struct ifnet *ifp = sc->ifp; int real_mtu, old_mtu; int err = 0; real_mtu = mtu + ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; if ((real_mtu > sc->max_mtu) || real_mtu < 60) return EINVAL; mtx_lock(&sc->driver_mtx); old_mtu = ifp->if_mtu; ifp->if_mtu = mtu; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { mxge_close(sc, 0); err = mxge_open(sc); if (err != 0) { ifp->if_mtu = old_mtu; mxge_close(sc, 0); (void) mxge_open(sc); } } mtx_unlock(&sc->driver_mtx); return err; } static void mxge_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { mxge_softc_t *sc = ifp->if_softc; if (sc == NULL) return; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER | IFM_FDX; ifmr->ifm_status |= sc->link_state ? IFM_ACTIVE : 0; ifmr->ifm_active |= sc->current_media; } static int mxge_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { mxge_softc_t *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int err, mask; err = 0; switch (command) { case SIOCSIFADDR: case SIOCGIFADDR: err = ether_ioctl(ifp, command, data); break; case SIOCSIFMTU: err = mxge_change_mtu(sc, ifr->ifr_mtu); break; case SIOCSIFFLAGS: mtx_lock(&sc->driver_mtx); if (sc->dying) { mtx_unlock(&sc->driver_mtx); return EINVAL; } if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { err = mxge_open(sc); } else { /* take care of promis can allmulti flag chages */ mxge_change_promisc(sc, ifp->if_flags & IFF_PROMISC); mxge_set_multicast_list(sc); } } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { mxge_close(sc, 0); } } mtx_unlock(&sc->driver_mtx); break; case SIOCADDMULTI: case SIOCDELMULTI: mtx_lock(&sc->driver_mtx); mxge_set_multicast_list(sc); mtx_unlock(&sc->driver_mtx); break; case SIOCSIFCAP: mtx_lock(&sc->driver_mtx); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { if (IFCAP_TXCSUM & ifp->if_capenable) { ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4); ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP); } else { ifp->if_capenable |= IFCAP_TXCSUM; ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); } } else if (mask & IFCAP_RXCSUM) { if (IFCAP_RXCSUM & ifp->if_capenable) { ifp->if_capenable &= ~IFCAP_RXCSUM; } else { ifp->if_capenable |= IFCAP_RXCSUM; } } if (mask & IFCAP_TSO4) { if (IFCAP_TSO4 & ifp->if_capenable) { ifp->if_capenable &= ~IFCAP_TSO4; } else if (IFCAP_TXCSUM & ifp->if_capenable) { ifp->if_capenable |= IFCAP_TSO4; ifp->if_hwassist |= CSUM_TSO; } else { printf("mxge requires tx checksum offload" " be enabled to use TSO\n"); err = EINVAL; } } #if IFCAP_TSO6 if (mask & IFCAP_TXCSUM_IPV6) { if (IFCAP_TXCSUM_IPV6 & ifp->if_capenable) { ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6); ifp->if_hwassist &= ~(CSUM_TCP_IPV6 | CSUM_UDP); } else { ifp->if_capenable |= IFCAP_TXCSUM_IPV6; ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6); } } else if (mask & IFCAP_RXCSUM_IPV6) { if (IFCAP_RXCSUM_IPV6 & ifp->if_capenable) { ifp->if_capenable &= ~IFCAP_RXCSUM_IPV6; } else { ifp->if_capenable |= IFCAP_RXCSUM_IPV6; } } if (mask & IFCAP_TSO6) { if (IFCAP_TSO6 & ifp->if_capenable) { ifp->if_capenable &= ~IFCAP_TSO6; } else if (IFCAP_TXCSUM_IPV6 & ifp->if_capenable) { ifp->if_capenable |= IFCAP_TSO6; ifp->if_hwassist |= CSUM_TSO; } else { printf("mxge requires tx checksum offload" " be enabled to use TSO\n"); err = EINVAL; } } #endif /*IFCAP_TSO6 */ if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (!(ifp->if_capabilities & IFCAP_VLAN_HWTSO) || !(ifp->if_capenable & IFCAP_VLAN_HWTAGGING)) ifp->if_capenable &= ~IFCAP_VLAN_HWTSO; mtx_unlock(&sc->driver_mtx); VLAN_CAPABILITIES(ifp); break; case SIOCGIFMEDIA: mtx_lock(&sc->driver_mtx); mxge_media_probe(sc); mtx_unlock(&sc->driver_mtx); err = ifmedia_ioctl(ifp, (struct ifreq *)data, &sc->media, command); break; default: err = ENOTTY; } return err; } static void mxge_fetch_tunables(mxge_softc_t *sc) { TUNABLE_INT_FETCH("hw.mxge.max_slices", &mxge_max_slices); TUNABLE_INT_FETCH("hw.mxge.flow_control_enabled", &mxge_flow_control); TUNABLE_INT_FETCH("hw.mxge.intr_coal_delay", &mxge_intr_coal_delay); TUNABLE_INT_FETCH("hw.mxge.nvidia_ecrc_enable", &mxge_nvidia_ecrc_enable); TUNABLE_INT_FETCH("hw.mxge.force_firmware", &mxge_force_firmware); TUNABLE_INT_FETCH("hw.mxge.deassert_wait", &mxge_deassert_wait); TUNABLE_INT_FETCH("hw.mxge.verbose", &mxge_verbose); TUNABLE_INT_FETCH("hw.mxge.ticks", &mxge_ticks); TUNABLE_INT_FETCH("hw.mxge.always_promisc", &mxge_always_promisc); TUNABLE_INT_FETCH("hw.mxge.rss_hash_type", &mxge_rss_hash_type); TUNABLE_INT_FETCH("hw.mxge.rss_hashtype", &mxge_rss_hash_type); TUNABLE_INT_FETCH("hw.mxge.initial_mtu", &mxge_initial_mtu); TUNABLE_INT_FETCH("hw.mxge.throttle", &mxge_throttle); if (bootverbose) mxge_verbose = 1; if (mxge_intr_coal_delay < 0 || mxge_intr_coal_delay > 10*1000) mxge_intr_coal_delay = 30; if (mxge_ticks == 0) mxge_ticks = hz / 2; sc->pause = mxge_flow_control; if (mxge_rss_hash_type < MXGEFW_RSS_HASH_TYPE_IPV4 || mxge_rss_hash_type > MXGEFW_RSS_HASH_TYPE_MAX) { mxge_rss_hash_type = MXGEFW_RSS_HASH_TYPE_SRC_DST_PORT; } if (mxge_initial_mtu > ETHERMTU_JUMBO || mxge_initial_mtu < ETHER_MIN_LEN) mxge_initial_mtu = ETHERMTU_JUMBO; if (mxge_throttle && mxge_throttle > MXGE_MAX_THROTTLE) mxge_throttle = MXGE_MAX_THROTTLE; if (mxge_throttle && mxge_throttle < MXGE_MIN_THROTTLE) mxge_throttle = MXGE_MIN_THROTTLE; sc->throttle = mxge_throttle; } static void mxge_free_slices(mxge_softc_t *sc) { struct mxge_slice_state *ss; int i; if (sc->ss == NULL) return; for (i = 0; i < sc->num_slices; i++) { ss = &sc->ss[i]; if (ss->fw_stats != NULL) { mxge_dma_free(&ss->fw_stats_dma); ss->fw_stats = NULL; #ifdef IFNET_BUF_RING if (ss->tx.br != NULL) { drbr_free(ss->tx.br, M_DEVBUF); ss->tx.br = NULL; } #endif mtx_destroy(&ss->tx.mtx); } if (ss->rx_done.entry != NULL) { mxge_dma_free(&ss->rx_done.dma); ss->rx_done.entry = NULL; } } free(sc->ss, M_DEVBUF); sc->ss = NULL; } static int mxge_alloc_slices(mxge_softc_t *sc) { mxge_cmd_t cmd; struct mxge_slice_state *ss; size_t bytes; int err, i, max_intr_slots; err = mxge_send_cmd(sc, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd); if (err != 0) { device_printf(sc->dev, "Cannot determine rx ring size\n"); return err; } sc->rx_ring_size = cmd.data0; max_intr_slots = 2 * (sc->rx_ring_size / sizeof (mcp_dma_addr_t)); - bytes = sizeof (*sc->ss) * sc->num_slices; - sc->ss = malloc(bytes, M_DEVBUF, M_NOWAIT | M_ZERO); + sc->ss = mallocarray(sc->num_slices, sizeof(*sc->ss), M_DEVBUF, + M_NOWAIT | M_ZERO); if (sc->ss == NULL) return (ENOMEM); for (i = 0; i < sc->num_slices; i++) { ss = &sc->ss[i]; ss->sc = sc; /* allocate per-slice rx interrupt queues */ bytes = max_intr_slots * sizeof (*ss->rx_done.entry); err = mxge_dma_alloc(sc, &ss->rx_done.dma, bytes, 4096); if (err != 0) goto abort; ss->rx_done.entry = ss->rx_done.dma.addr; bzero(ss->rx_done.entry, bytes); /* * allocate the per-slice firmware stats; stats * (including tx) are used used only on the first * slice for now */ #ifndef IFNET_BUF_RING if (i > 0) continue; #endif bytes = sizeof (*ss->fw_stats); err = mxge_dma_alloc(sc, &ss->fw_stats_dma, sizeof (*ss->fw_stats), 64); if (err != 0) goto abort; ss->fw_stats = (mcp_irq_data_t *)ss->fw_stats_dma.addr; snprintf(ss->tx.mtx_name, sizeof(ss->tx.mtx_name), "%s:tx(%d)", device_get_nameunit(sc->dev), i); mtx_init(&ss->tx.mtx, ss->tx.mtx_name, NULL, MTX_DEF); #ifdef IFNET_BUF_RING ss->tx.br = buf_ring_alloc(2048, M_DEVBUF, M_WAITOK, &ss->tx.mtx); #endif } return (0); abort: mxge_free_slices(sc); return (ENOMEM); } static void mxge_slice_probe(mxge_softc_t *sc) { mxge_cmd_t cmd; char *old_fw; int msix_cnt, status, max_intr_slots; sc->num_slices = 1; /* * don't enable multiple slices if they are not enabled, * or if this is not an SMP system */ if (mxge_max_slices == 0 || mxge_max_slices == 1 || mp_ncpus < 2) return; /* see how many MSI-X interrupts are available */ msix_cnt = pci_msix_count(sc->dev); if (msix_cnt < 2) return; /* now load the slice aware firmware see what it supports */ old_fw = sc->fw_name; if (old_fw == mxge_fw_aligned) sc->fw_name = mxge_fw_rss_aligned; else sc->fw_name = mxge_fw_rss_unaligned; status = mxge_load_firmware(sc, 0); if (status != 0) { device_printf(sc->dev, "Falling back to a single slice\n"); return; } /* try to send a reset command to the card to see if it is alive */ memset(&cmd, 0, sizeof (cmd)); status = mxge_send_cmd(sc, MXGEFW_CMD_RESET, &cmd); if (status != 0) { device_printf(sc->dev, "failed reset\n"); goto abort_with_fw; } /* get rx ring size */ status = mxge_send_cmd(sc, MXGEFW_CMD_GET_RX_RING_SIZE, &cmd); if (status != 0) { device_printf(sc->dev, "Cannot determine rx ring size\n"); goto abort_with_fw; } max_intr_slots = 2 * (cmd.data0 / sizeof (mcp_dma_addr_t)); /* tell it the size of the interrupt queues */ cmd.data0 = max_intr_slots * sizeof (struct mcp_slot); status = mxge_send_cmd(sc, MXGEFW_CMD_SET_INTRQ_SIZE, &cmd); if (status != 0) { device_printf(sc->dev, "failed MXGEFW_CMD_SET_INTRQ_SIZE\n"); goto abort_with_fw; } /* ask the maximum number of slices it supports */ status = mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_RSS_QUEUES, &cmd); if (status != 0) { device_printf(sc->dev, "failed MXGEFW_CMD_GET_MAX_RSS_QUEUES\n"); goto abort_with_fw; } sc->num_slices = cmd.data0; if (sc->num_slices > msix_cnt) sc->num_slices = msix_cnt; if (mxge_max_slices == -1) { /* cap to number of CPUs in system */ if (sc->num_slices > mp_ncpus) sc->num_slices = mp_ncpus; } else { if (sc->num_slices > mxge_max_slices) sc->num_slices = mxge_max_slices; } /* make sure it is a power of two */ while (sc->num_slices & (sc->num_slices - 1)) sc->num_slices--; if (mxge_verbose) device_printf(sc->dev, "using %d slices\n", sc->num_slices); return; abort_with_fw: sc->fw_name = old_fw; (void) mxge_load_firmware(sc, 0); } static int mxge_add_msix_irqs(mxge_softc_t *sc) { size_t bytes; int count, err, i, rid; rid = PCIR_BAR(2); sc->msix_table_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->msix_table_res == NULL) { device_printf(sc->dev, "couldn't alloc MSIX table res\n"); return ENXIO; } count = sc->num_slices; err = pci_alloc_msix(sc->dev, &count); if (err != 0) { device_printf(sc->dev, "pci_alloc_msix: failed, wanted %d" "err = %d \n", sc->num_slices, err); goto abort_with_msix_table; } if (count < sc->num_slices) { device_printf(sc->dev, "pci_alloc_msix: need %d, got %d\n", count, sc->num_slices); device_printf(sc->dev, "Try setting hw.mxge.max_slices to %d\n", count); err = ENOSPC; goto abort_with_msix; } - bytes = sizeof (*sc->msix_irq_res) * sc->num_slices; - sc->msix_irq_res = malloc(bytes, M_DEVBUF, M_NOWAIT|M_ZERO); + sc->msix_irq_res = mallocarray(sc->num_slices, + sizeof(*sc->msix_irq_res), M_DEVBUF, M_NOWAIT|M_ZERO); if (sc->msix_irq_res == NULL) { err = ENOMEM; goto abort_with_msix; } for (i = 0; i < sc->num_slices; i++) { rid = i + 1; sc->msix_irq_res[i] = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (sc->msix_irq_res[i] == NULL) { device_printf(sc->dev, "couldn't allocate IRQ res" " for message %d\n", i); err = ENXIO; goto abort_with_res; } } - bytes = sizeof (*sc->msix_ih) * sc->num_slices; - sc->msix_ih = malloc(bytes, M_DEVBUF, M_NOWAIT|M_ZERO); + sc->msix_ih = mallocarray(sc->num_slices, sizeof(*sc->msix_ih), + M_DEVBUF, M_NOWAIT|M_ZERO); for (i = 0; i < sc->num_slices; i++) { err = bus_setup_intr(sc->dev, sc->msix_irq_res[i], INTR_TYPE_NET | INTR_MPSAFE, #if __FreeBSD_version > 700030 NULL, #endif mxge_intr, &sc->ss[i], &sc->msix_ih[i]); if (err != 0) { device_printf(sc->dev, "couldn't setup intr for " "message %d\n", i); goto abort_with_intr; } bus_describe_intr(sc->dev, sc->msix_irq_res[i], sc->msix_ih[i], "s%d", i); } if (mxge_verbose) { device_printf(sc->dev, "using %d msix IRQs:", sc->num_slices); for (i = 0; i < sc->num_slices; i++) printf(" %jd", rman_get_start(sc->msix_irq_res[i])); printf("\n"); } return (0); abort_with_intr: for (i = 0; i < sc->num_slices; i++) { if (sc->msix_ih[i] != NULL) { bus_teardown_intr(sc->dev, sc->msix_irq_res[i], sc->msix_ih[i]); sc->msix_ih[i] = NULL; } } free(sc->msix_ih, M_DEVBUF); abort_with_res: for (i = 0; i < sc->num_slices; i++) { rid = i + 1; if (sc->msix_irq_res[i] != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, rid, sc->msix_irq_res[i]); sc->msix_irq_res[i] = NULL; } free(sc->msix_irq_res, M_DEVBUF); abort_with_msix: pci_release_msi(sc->dev); abort_with_msix_table: bus_release_resource(sc->dev, SYS_RES_MEMORY, PCIR_BAR(2), sc->msix_table_res); return err; } static int mxge_add_single_irq(mxge_softc_t *sc) { int count, err, rid; count = pci_msi_count(sc->dev); if (count == 1 && pci_alloc_msi(sc->dev, &count) == 0) { rid = 1; } else { rid = 0; sc->legacy_irq = 1; } sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(sc->dev, "could not alloc interrupt\n"); return ENXIO; } if (mxge_verbose) device_printf(sc->dev, "using %s irq %jd\n", sc->legacy_irq ? "INTx" : "MSI", rman_get_start(sc->irq_res)); err = bus_setup_intr(sc->dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE, #if __FreeBSD_version > 700030 NULL, #endif mxge_intr, &sc->ss[0], &sc->ih); if (err != 0) { bus_release_resource(sc->dev, SYS_RES_IRQ, sc->legacy_irq ? 0 : 1, sc->irq_res); if (!sc->legacy_irq) pci_release_msi(sc->dev); } return err; } static void mxge_rem_msix_irqs(mxge_softc_t *sc) { int i, rid; for (i = 0; i < sc->num_slices; i++) { if (sc->msix_ih[i] != NULL) { bus_teardown_intr(sc->dev, sc->msix_irq_res[i], sc->msix_ih[i]); sc->msix_ih[i] = NULL; } } free(sc->msix_ih, M_DEVBUF); for (i = 0; i < sc->num_slices; i++) { rid = i + 1; if (sc->msix_irq_res[i] != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, rid, sc->msix_irq_res[i]); sc->msix_irq_res[i] = NULL; } free(sc->msix_irq_res, M_DEVBUF); bus_release_resource(sc->dev, SYS_RES_MEMORY, PCIR_BAR(2), sc->msix_table_res); pci_release_msi(sc->dev); return; } static void mxge_rem_single_irq(mxge_softc_t *sc) { bus_teardown_intr(sc->dev, sc->irq_res, sc->ih); bus_release_resource(sc->dev, SYS_RES_IRQ, sc->legacy_irq ? 0 : 1, sc->irq_res); if (!sc->legacy_irq) pci_release_msi(sc->dev); } static void mxge_rem_irq(mxge_softc_t *sc) { if (sc->num_slices > 1) mxge_rem_msix_irqs(sc); else mxge_rem_single_irq(sc); } static int mxge_add_irq(mxge_softc_t *sc) { int err; if (sc->num_slices > 1) err = mxge_add_msix_irqs(sc); else err = mxge_add_single_irq(sc); if (0 && err == 0 && sc->num_slices > 1) { mxge_rem_msix_irqs(sc); err = mxge_add_msix_irqs(sc); } return err; } static int mxge_attach(device_t dev) { mxge_cmd_t cmd; mxge_softc_t *sc = device_get_softc(dev); struct ifnet *ifp; int err, rid; sc->dev = dev; mxge_fetch_tunables(sc); TASK_INIT(&sc->watchdog_task, 1, mxge_watchdog_task, sc); sc->tq = taskqueue_create("mxge_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->tq); if (sc->tq == NULL) { err = ENOMEM; goto abort_with_nothing; } err = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* low */ BUS_SPACE_MAXADDR, /* high */ NULL, NULL, /* filter */ 65536 + 256, /* maxsize */ MXGE_MAX_SEND_DESC, /* num segs */ 65536, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lock */ &sc->parent_dmat); /* tag */ if (err != 0) { device_printf(sc->dev, "Err %d allocating parent dmat\n", err); goto abort_with_tq; } ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); err = ENOSPC; goto abort_with_parent_dmat; } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); snprintf(sc->cmd_mtx_name, sizeof(sc->cmd_mtx_name), "%s:cmd", device_get_nameunit(dev)); mtx_init(&sc->cmd_mtx, sc->cmd_mtx_name, NULL, MTX_DEF); snprintf(sc->driver_mtx_name, sizeof(sc->driver_mtx_name), "%s:drv", device_get_nameunit(dev)); mtx_init(&sc->driver_mtx, sc->driver_mtx_name, MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->co_hdl, &sc->driver_mtx, 0); mxge_setup_cfg_space(sc); /* Map the board into the kernel */ rid = PCIR_BARS; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not map memory\n"); err = ENXIO; goto abort_with_lock; } sc->sram = rman_get_virtual(sc->mem_res); sc->sram_size = 2*1024*1024 - (2*(48*1024)+(32*1024)) - 0x100; if (sc->sram_size > rman_get_size(sc->mem_res)) { device_printf(dev, "impossible memory region size %jd\n", rman_get_size(sc->mem_res)); err = ENXIO; goto abort_with_mem_res; } /* make NULL terminated copy of the EEPROM strings section of lanai SRAM */ bzero(sc->eeprom_strings, MXGE_EEPROM_STRINGS_SIZE); bus_space_read_region_1(rman_get_bustag(sc->mem_res), rman_get_bushandle(sc->mem_res), sc->sram_size - MXGE_EEPROM_STRINGS_SIZE, sc->eeprom_strings, MXGE_EEPROM_STRINGS_SIZE - 2); err = mxge_parse_strings(sc); if (err != 0) goto abort_with_mem_res; /* Enable write combining for efficient use of PCIe bus */ mxge_enable_wc(sc); /* Allocate the out of band dma memory */ err = mxge_dma_alloc(sc, &sc->cmd_dma, sizeof (mxge_cmd_t), 64); if (err != 0) goto abort_with_mem_res; sc->cmd = (mcp_cmd_response_t *) sc->cmd_dma.addr; err = mxge_dma_alloc(sc, &sc->zeropad_dma, 64, 64); if (err != 0) goto abort_with_cmd_dma; err = mxge_dma_alloc(sc, &sc->dmabench_dma, 4096, 4096); if (err != 0) goto abort_with_zeropad_dma; /* select & load the firmware */ err = mxge_select_firmware(sc); if (err != 0) goto abort_with_dmabench; sc->intr_coal_delay = mxge_intr_coal_delay; mxge_slice_probe(sc); err = mxge_alloc_slices(sc); if (err != 0) goto abort_with_dmabench; err = mxge_reset(sc, 0); if (err != 0) goto abort_with_slices; err = mxge_alloc_rings(sc); if (err != 0) { device_printf(sc->dev, "failed to allocate rings\n"); goto abort_with_slices; } err = mxge_add_irq(sc); if (err != 0) { device_printf(sc->dev, "failed to add irq\n"); goto abort_with_rings; } ifp->if_baudrate = IF_Gbps(10); ifp->if_capabilities = IFCAP_RXCSUM | IFCAP_TXCSUM | IFCAP_TSO4 | IFCAP_VLAN_MTU | IFCAP_LINKSTATE | IFCAP_TXCSUM_IPV6 | IFCAP_RXCSUM_IPV6; #if defined(INET) || defined(INET6) ifp->if_capabilities |= IFCAP_LRO; #endif #ifdef MXGE_NEW_VLAN_API ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; /* Only FW 1.4.32 and newer can do TSO over vlans */ if (sc->fw_ver_major == 1 && sc->fw_ver_minor == 4 && sc->fw_ver_tiny >= 32) ifp->if_capabilities |= IFCAP_VLAN_HWTSO; #endif sc->max_mtu = mxge_max_mtu(sc); if (sc->max_mtu >= 9000) ifp->if_capabilities |= IFCAP_JUMBO_MTU; else device_printf(dev, "MTU limited to %d. Install " "latest firmware for 9000 byte jumbo support\n", sc->max_mtu - ETHER_HDR_LEN); ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_TSO; ifp->if_hwassist |= CSUM_TCP_IPV6 | CSUM_UDP_IPV6; /* check to see if f/w supports TSO for IPv6 */ if (!mxge_send_cmd(sc, MXGEFW_CMD_GET_MAX_TSO6_HDR_SIZE, &cmd)) { if (CSUM_TCP_IPV6) ifp->if_capabilities |= IFCAP_TSO6; sc->max_tso6_hlen = min(cmd.data0, sizeof (sc->ss[0].scratch)); } ifp->if_capenable = ifp->if_capabilities; if (sc->lro_cnt == 0) ifp->if_capenable &= ~IFCAP_LRO; ifp->if_init = mxge_init; ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = mxge_ioctl; ifp->if_start = mxge_start; ifp->if_get_counter = mxge_get_counter; /* Initialise the ifmedia structure */ ifmedia_init(&sc->media, 0, mxge_media_change, mxge_media_status); mxge_media_init(sc); mxge_media_probe(sc); sc->dying = 0; ether_ifattach(ifp, sc->mac_addr); /* ether_ifattach sets mtu to ETHERMTU */ if (mxge_initial_mtu != ETHERMTU) mxge_change_mtu(sc, mxge_initial_mtu); mxge_add_sysctls(sc); #ifdef IFNET_BUF_RING ifp->if_transmit = mxge_transmit; ifp->if_qflush = mxge_qflush; #endif taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->dev)); callout_reset(&sc->co_hdl, mxge_ticks, mxge_tick, sc); return 0; abort_with_rings: mxge_free_rings(sc); abort_with_slices: mxge_free_slices(sc); abort_with_dmabench: mxge_dma_free(&sc->dmabench_dma); abort_with_zeropad_dma: mxge_dma_free(&sc->zeropad_dma); abort_with_cmd_dma: mxge_dma_free(&sc->cmd_dma); abort_with_mem_res: bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BARS, sc->mem_res); abort_with_lock: pci_disable_busmaster(dev); mtx_destroy(&sc->cmd_mtx); mtx_destroy(&sc->driver_mtx); if_free(ifp); abort_with_parent_dmat: bus_dma_tag_destroy(sc->parent_dmat); abort_with_tq: if (sc->tq != NULL) { taskqueue_drain(sc->tq, &sc->watchdog_task); taskqueue_free(sc->tq); sc->tq = NULL; } abort_with_nothing: return err; } static int mxge_detach(device_t dev) { mxge_softc_t *sc = device_get_softc(dev); if (mxge_vlans_active(sc)) { device_printf(sc->dev, "Detach vlans before removing module\n"); return EBUSY; } mtx_lock(&sc->driver_mtx); sc->dying = 1; if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) mxge_close(sc, 0); mtx_unlock(&sc->driver_mtx); ether_ifdetach(sc->ifp); if (sc->tq != NULL) { taskqueue_drain(sc->tq, &sc->watchdog_task); taskqueue_free(sc->tq); sc->tq = NULL; } callout_drain(&sc->co_hdl); ifmedia_removeall(&sc->media); mxge_dummy_rdma(sc, 0); mxge_rem_sysctls(sc); mxge_rem_irq(sc); mxge_free_rings(sc); mxge_free_slices(sc); mxge_dma_free(&sc->dmabench_dma); mxge_dma_free(&sc->zeropad_dma); mxge_dma_free(&sc->cmd_dma); bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BARS, sc->mem_res); pci_disable_busmaster(dev); mtx_destroy(&sc->cmd_mtx); mtx_destroy(&sc->driver_mtx); if_free(sc->ifp); bus_dma_tag_destroy(sc->parent_dmat); return 0; } static int mxge_shutdown(device_t dev) { return 0; } /* This file uses Myri10GE driver indentation. Local Variables: c-file-style:"linux" tab-width:8 End: */ Index: head/sys/dev/netmap/if_ptnet.c =================================================================== --- head/sys/dev/netmap/if_ptnet.c (revision 327948) +++ head/sys/dev/netmap/if_ptnet.c (revision 327949) @@ -1,2276 +1,2276 @@ /*- * Copyright (c) 2016, Vincenzo Maffione * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* Driver for ptnet paravirtualized network device. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #ifndef PTNET_CSB_ALLOC #error "No support for on-device CSB" #endif #ifndef INET #error "INET not defined, cannot support offloadings" #endif #if __FreeBSD_version >= 1100000 static uint64_t ptnet_get_counter(if_t, ift_counter); #else typedef struct ifnet *if_t; #define if_getsoftc(_ifp) (_ifp)->if_softc #endif //#define PTNETMAP_STATS //#define DEBUG #ifdef DEBUG #define DBG(x) x #else /* !DEBUG */ #define DBG(x) #endif /* !DEBUG */ extern int ptnet_vnet_hdr; /* Tunable parameter */ struct ptnet_softc; struct ptnet_queue_stats { uint64_t packets; /* if_[io]packets */ uint64_t bytes; /* if_[io]bytes */ uint64_t errors; /* if_[io]errors */ uint64_t iqdrops; /* if_iqdrops */ uint64_t mcasts; /* if_[io]mcasts */ #ifdef PTNETMAP_STATS uint64_t intrs; uint64_t kicks; #endif /* PTNETMAP_STATS */ }; struct ptnet_queue { struct ptnet_softc *sc; struct resource *irq; void *cookie; int kring_id; struct ptnet_ring *ptring; unsigned int kick; struct mtx lock; struct buf_ring *bufring; /* for TX queues */ struct ptnet_queue_stats stats; #ifdef PTNETMAP_STATS struct ptnet_queue_stats last_stats; #endif /* PTNETMAP_STATS */ struct taskqueue *taskq; struct task task; char lock_name[16]; }; #define PTNET_Q_LOCK(_pq) mtx_lock(&(_pq)->lock) #define PTNET_Q_TRYLOCK(_pq) mtx_trylock(&(_pq)->lock) #define PTNET_Q_UNLOCK(_pq) mtx_unlock(&(_pq)->lock) struct ptnet_softc { device_t dev; if_t ifp; struct ifmedia media; struct mtx lock; char lock_name[16]; char hwaddr[ETHER_ADDR_LEN]; /* Mirror of PTFEAT register. */ uint32_t ptfeatures; unsigned int vnet_hdr_len; /* PCI BARs support. */ struct resource *iomem; struct resource *msix_mem; unsigned int num_rings; unsigned int num_tx_rings; struct ptnet_queue *queues; struct ptnet_queue *rxqueues; struct ptnet_csb *csb; unsigned int min_tx_space; struct netmap_pt_guest_adapter *ptna; struct callout tick; #ifdef PTNETMAP_STATS struct timeval last_ts; #endif /* PTNETMAP_STATS */ }; #define PTNET_CORE_LOCK(_sc) mtx_lock(&(_sc)->lock) #define PTNET_CORE_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) static int ptnet_probe(device_t); static int ptnet_attach(device_t); static int ptnet_detach(device_t); static int ptnet_suspend(device_t); static int ptnet_resume(device_t); static int ptnet_shutdown(device_t); static void ptnet_init(void *opaque); static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data); static int ptnet_init_locked(struct ptnet_softc *sc); static int ptnet_stop(struct ptnet_softc *sc); static int ptnet_transmit(if_t ifp, struct mbuf *m); static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, bool may_resched); static void ptnet_qflush(if_t ifp); static void ptnet_tx_task(void *context, int pending); static int ptnet_media_change(if_t ifp); static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr); #ifdef PTNETMAP_STATS static void ptnet_tick(void *opaque); #endif static int ptnet_irqs_init(struct ptnet_softc *sc); static void ptnet_irqs_fini(struct ptnet_softc *sc); static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd); static int ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, unsigned *txd, unsigned *rxr, unsigned *rxd); static void ptnet_update_vnet_hdr(struct ptnet_softc *sc); static int ptnet_nm_register(struct netmap_adapter *na, int onoff); static int ptnet_nm_txsync(struct netmap_kring *kring, int flags); static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags); static void ptnet_tx_intr(void *opaque); static void ptnet_rx_intr(void *opaque); static unsigned ptnet_rx_discard(struct netmap_kring *kring, unsigned int head); static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched); static void ptnet_rx_task(void *context, int pending); #ifdef DEVICE_POLLING static poll_handler_t ptnet_poll; #endif static device_method_t ptnet_methods[] = { DEVMETHOD(device_probe, ptnet_probe), DEVMETHOD(device_attach, ptnet_attach), DEVMETHOD(device_detach, ptnet_detach), DEVMETHOD(device_suspend, ptnet_suspend), DEVMETHOD(device_resume, ptnet_resume), DEVMETHOD(device_shutdown, ptnet_shutdown), DEVMETHOD_END }; static driver_t ptnet_driver = { "ptnet", ptnet_methods, sizeof(struct ptnet_softc) }; /* We use (SI_ORDER_MIDDLE+2) here, see DEV_MODULE_ORDERED() invocation. */ static devclass_t ptnet_devclass; DRIVER_MODULE_ORDERED(ptnet, pci, ptnet_driver, ptnet_devclass, NULL, NULL, SI_ORDER_MIDDLE + 2); static int ptnet_probe(device_t dev) { if (pci_get_vendor(dev) != PTNETMAP_PCI_VENDOR_ID || pci_get_device(dev) != PTNETMAP_PCI_NETIF_ID) { return (ENXIO); } device_set_desc(dev, "ptnet network adapter"); return (BUS_PROBE_DEFAULT); } static inline void ptnet_kick(struct ptnet_queue *pq) { #ifdef PTNETMAP_STATS pq->stats.kicks ++; #endif /* PTNETMAP_STATS */ bus_write_4(pq->sc->iomem, pq->kick, 0); } #define PTNET_BUF_RING_SIZE 4096 #define PTNET_RX_BUDGET 512 #define PTNET_RX_BATCH 1 #define PTNET_TX_BUDGET 512 #define PTNET_TX_BATCH 64 #define PTNET_HDR_SIZE sizeof(struct virtio_net_hdr_mrg_rxbuf) #define PTNET_MAX_PKT_SIZE 65536 #define PTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP | CSUM_SCTP) #define PTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6 |\ CSUM_SCTP_IPV6) #define PTNET_ALL_OFFLOAD (CSUM_TSO | PTNET_CSUM_OFFLOAD |\ PTNET_CSUM_OFFLOAD_IPV6) static int ptnet_attach(device_t dev) { uint32_t ptfeatures = 0; unsigned int num_rx_rings, num_tx_rings; struct netmap_adapter na_arg; unsigned int nifp_offset; struct ptnet_softc *sc; if_t ifp; uint32_t macreg; int err, rid; int i; sc = device_get_softc(dev); sc->dev = dev; /* Setup PCI resources. */ pci_enable_busmaster(dev); rid = PCIR_BAR(PTNETMAP_IO_PCI_BAR); sc->iomem = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->iomem == NULL) { device_printf(dev, "Failed to map I/O BAR\n"); return (ENXIO); } /* Negotiate features with the hypervisor. */ if (ptnet_vnet_hdr) { ptfeatures |= PTNETMAP_F_VNET_HDR; } bus_write_4(sc->iomem, PTNET_IO_PTFEAT, ptfeatures); /* wanted */ ptfeatures = bus_read_4(sc->iomem, PTNET_IO_PTFEAT); /* acked */ sc->ptfeatures = ptfeatures; /* Allocate CSB and carry out CSB allocation protocol (CSBBAH first, * then CSBBAL). */ sc->csb = malloc(sizeof(struct ptnet_csb), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->csb == NULL) { device_printf(dev, "Failed to allocate CSB\n"); err = ENOMEM; goto err_path; } { /* * We use uint64_t rather than vm_paddr_t since we * need 64 bit addresses even on 32 bit platforms. */ uint64_t paddr = vtophys(sc->csb); bus_write_4(sc->iomem, PTNET_IO_CSBBAH, (paddr >> 32) & 0xffffffff); bus_write_4(sc->iomem, PTNET_IO_CSBBAL, paddr & 0xffffffff); } num_tx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); num_rx_rings = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); sc->num_rings = num_tx_rings + num_rx_rings; sc->num_tx_rings = num_tx_rings; /* Allocate and initialize per-queue data structures. */ - sc->queues = malloc(sizeof(struct ptnet_queue) * sc->num_rings, + sc->queues = mallocarray(sc->num_rings, sizeof(struct ptnet_queue), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->queues == NULL) { err = ENOMEM; goto err_path; } sc->rxqueues = sc->queues + num_tx_rings; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; pq->sc = sc; pq->kring_id = i; pq->kick = PTNET_IO_KICK_BASE + 4 * i; pq->ptring = sc->csb->rings + i; snprintf(pq->lock_name, sizeof(pq->lock_name), "%s-%d", device_get_nameunit(dev), i); mtx_init(&pq->lock, pq->lock_name, NULL, MTX_DEF); if (i >= num_tx_rings) { /* RX queue: fix kring_id. */ pq->kring_id -= num_tx_rings; } else { /* TX queue: allocate buf_ring. */ pq->bufring = buf_ring_alloc(PTNET_BUF_RING_SIZE, M_DEVBUF, M_NOWAIT, &pq->lock); if (pq->bufring == NULL) { err = ENOMEM; goto err_path; } } } sc->min_tx_space = 64; /* Safe initial value. */ err = ptnet_irqs_init(sc); if (err) { goto err_path; } /* Setup Ethernet interface. */ sc->ifp = ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "Failed to allocate ifnet\n"); err = ENOMEM; goto err_path; } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_baudrate = IF_Gbps(10); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; ifp->if_init = ptnet_init; ifp->if_ioctl = ptnet_ioctl; #if __FreeBSD_version >= 1100000 ifp->if_get_counter = ptnet_get_counter; #endif ifp->if_transmit = ptnet_transmit; ifp->if_qflush = ptnet_qflush; ifmedia_init(&sc->media, IFM_IMASK, ptnet_media_change, ptnet_media_status); ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_10G_T | IFM_FDX); macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_HI); sc->hwaddr[0] = (macreg >> 8) & 0xff; sc->hwaddr[1] = macreg & 0xff; macreg = bus_read_4(sc->iomem, PTNET_IO_MAC_LO); sc->hwaddr[2] = (macreg >> 24) & 0xff; sc->hwaddr[3] = (macreg >> 16) & 0xff; sc->hwaddr[4] = (macreg >> 8) & 0xff; sc->hwaddr[5] = macreg & 0xff; ether_ifattach(ifp, sc->hwaddr); ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; if (sc->ptfeatures & PTNETMAP_F_VNET_HDR) { /* Similarly to what the vtnet driver does, we can emulate * VLAN offloadings by inserting and removing the 802.1Q * header during transmit and receive. We are then able * to do checksum offloading of VLAN frames. */ ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_LRO | IFCAP_VLAN_HWTSO | IFCAP_VLAN_HWTAGGING; } ifp->if_capenable = ifp->if_capabilities; #ifdef DEVICE_POLLING /* Don't enable polling by default. */ ifp->if_capabilities |= IFCAP_POLLING; #endif snprintf(sc->lock_name, sizeof(sc->lock_name), "%s", device_get_nameunit(dev)); mtx_init(&sc->lock, sc->lock_name, "ptnet core lock", MTX_DEF); callout_init_mtx(&sc->tick, &sc->lock, 0); /* Prepare a netmap_adapter struct instance to do netmap_attach(). */ nifp_offset = bus_read_4(sc->iomem, PTNET_IO_NIFP_OFS); memset(&na_arg, 0, sizeof(na_arg)); na_arg.ifp = ifp; na_arg.num_tx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); na_arg.num_rx_desc = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); na_arg.num_tx_rings = num_tx_rings; na_arg.num_rx_rings = num_rx_rings; na_arg.nm_config = ptnet_nm_config; na_arg.nm_krings_create = ptnet_nm_krings_create; na_arg.nm_krings_delete = ptnet_nm_krings_delete; na_arg.nm_dtor = ptnet_nm_dtor; na_arg.nm_register = ptnet_nm_register; na_arg.nm_txsync = ptnet_nm_txsync; na_arg.nm_rxsync = ptnet_nm_rxsync; netmap_pt_guest_attach(&na_arg, sc->csb, nifp_offset, bus_read_4(sc->iomem, PTNET_IO_HOSTMEMID)); /* Now a netmap adapter for this ifp has been allocated, and it * can be accessed through NA(ifp). We also have to initialize the CSB * pointer. */ sc->ptna = (struct netmap_pt_guest_adapter *)NA(ifp); /* If virtio-net header was negotiated, set the virt_hdr_len field in * the netmap adapter, to inform users that this netmap adapter requires * the application to deal with the headers. */ ptnet_update_vnet_hdr(sc); device_printf(dev, "%s() completed\n", __func__); return (0); err_path: ptnet_detach(dev); return err; } static int ptnet_detach(device_t dev) { struct ptnet_softc *sc = device_get_softc(dev); int i; #ifdef DEVICE_POLLING if (sc->ifp->if_capenable & IFCAP_POLLING) { ether_poll_deregister(sc->ifp); } #endif callout_drain(&sc->tick); if (sc->queues) { /* Drain taskqueues before calling if_detach. */ for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (pq->taskq) { taskqueue_drain(pq->taskq, &pq->task); } } } if (sc->ifp) { ether_ifdetach(sc->ifp); /* Uninitialize netmap adapters for this device. */ netmap_detach(sc->ifp); ifmedia_removeall(&sc->media); if_free(sc->ifp); sc->ifp = NULL; } ptnet_irqs_fini(sc); if (sc->csb) { bus_write_4(sc->iomem, PTNET_IO_CSBBAH, 0); bus_write_4(sc->iomem, PTNET_IO_CSBBAL, 0); free(sc->csb, M_DEVBUF); sc->csb = NULL; } if (sc->queues) { for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (mtx_initialized(&pq->lock)) { mtx_destroy(&pq->lock); } if (pq->bufring != NULL) { buf_ring_free(pq->bufring, M_DEVBUF); } } free(sc->queues, M_DEVBUF); sc->queues = NULL; } if (sc->iomem) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(PTNETMAP_IO_PCI_BAR), sc->iomem); sc->iomem = NULL; } mtx_destroy(&sc->lock); device_printf(dev, "%s() completed\n", __func__); return (0); } static int ptnet_suspend(device_t dev) { struct ptnet_softc *sc; sc = device_get_softc(dev); (void)sc; return (0); } static int ptnet_resume(device_t dev) { struct ptnet_softc *sc; sc = device_get_softc(dev); (void)sc; return (0); } static int ptnet_shutdown(device_t dev) { /* * Suspend already does all of what we need to * do here; we just never expect to be resumed. */ return (ptnet_suspend(dev)); } static int ptnet_irqs_init(struct ptnet_softc *sc) { int rid = PCIR_BAR(PTNETMAP_MSIX_PCI_BAR); int nvecs = sc->num_rings; device_t dev = sc->dev; int err = ENOSPC; int cpu_cur; int i; if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) { device_printf(dev, "Could not find MSI-X capability\n"); return (ENXIO); } sc->msix_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->msix_mem == NULL) { device_printf(dev, "Failed to allocate MSIX PCI BAR\n"); return (ENXIO); } if (pci_msix_count(dev) < nvecs) { device_printf(dev, "Not enough MSI-X vectors\n"); goto err_path; } err = pci_alloc_msix(dev, &nvecs); if (err) { device_printf(dev, "Failed to allocate MSI-X vectors\n"); goto err_path; } for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; rid = i + 1; pq->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (pq->irq == NULL) { device_printf(dev, "Failed to allocate interrupt " "for queue #%d\n", i); err = ENOSPC; goto err_path; } } cpu_cur = CPU_FIRST(); for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; void (*handler)(void *) = ptnet_tx_intr; if (i >= sc->num_tx_rings) { handler = ptnet_rx_intr; } err = bus_setup_intr(dev, pq->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL /* intr_filter */, handler, pq, &pq->cookie); if (err) { device_printf(dev, "Failed to register intr handler " "for queue #%d\n", i); goto err_path; } bus_describe_intr(dev, pq->irq, pq->cookie, "q%d", i); #if 0 bus_bind_intr(sc->dev, pq->irq, cpu_cur); #endif cpu_cur = CPU_NEXT(cpu_cur); } device_printf(dev, "Allocated %d MSI-X vectors\n", nvecs); cpu_cur = CPU_FIRST(); for (i = 0; i < nvecs; i++) { struct ptnet_queue *pq = sc->queues + i; static void (*handler)(void *context, int pending); handler = (i < sc->num_tx_rings) ? ptnet_tx_task : ptnet_rx_task; TASK_INIT(&pq->task, 0, handler, pq); pq->taskq = taskqueue_create_fast("ptnet_queue", M_NOWAIT, taskqueue_thread_enqueue, &pq->taskq); taskqueue_start_threads(&pq->taskq, 1, PI_NET, "%s-pq-%d", device_get_nameunit(sc->dev), cpu_cur); cpu_cur = CPU_NEXT(cpu_cur); } return 0; err_path: ptnet_irqs_fini(sc); return err; } static void ptnet_irqs_fini(struct ptnet_softc *sc) { device_t dev = sc->dev; int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (pq->taskq) { taskqueue_free(pq->taskq); pq->taskq = NULL; } if (pq->cookie) { bus_teardown_intr(dev, pq->irq, pq->cookie); pq->cookie = NULL; } if (pq->irq) { bus_release_resource(dev, SYS_RES_IRQ, i + 1, pq->irq); pq->irq = NULL; } } if (sc->msix_mem) { pci_release_msi(dev); bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(PTNETMAP_MSIX_PCI_BAR), sc->msix_mem); sc->msix_mem = NULL; } } static void ptnet_init(void *opaque) { struct ptnet_softc *sc = opaque; PTNET_CORE_LOCK(sc); ptnet_init_locked(sc); PTNET_CORE_UNLOCK(sc); } static int ptnet_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct ptnet_softc *sc = if_getsoftc(ifp); device_t dev = sc->dev; struct ifreq *ifr = (struct ifreq *)data; int mask, err = 0; switch (cmd) { case SIOCSIFFLAGS: device_printf(dev, "SIOCSIFFLAGS %x\n", ifp->if_flags); PTNET_CORE_LOCK(sc); if (ifp->if_flags & IFF_UP) { /* Network stack wants the iff to be up. */ err = ptnet_init_locked(sc); } else { /* Network stack wants the iff to be down. */ err = ptnet_stop(sc); } /* We don't need to do nothing to support IFF_PROMISC, * since that is managed by the backend port. */ PTNET_CORE_UNLOCK(sc); break; case SIOCSIFCAP: device_printf(dev, "SIOCSIFCAP %x %x\n", ifr->ifr_reqcap, ifp->if_capenable); mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (mask & IFCAP_POLLING) { struct ptnet_queue *pq; int i; if (ifr->ifr_reqcap & IFCAP_POLLING) { err = ether_poll_register(ptnet_poll, ifp); if (err) { break; } /* Stop queues and sync with taskqueues. */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; for (i = 0; i < sc->num_rings; i++) { pq = sc-> queues + i; /* Make sure the worker sees the * IFF_DRV_RUNNING down. */ PTNET_Q_LOCK(pq); pq->ptring->guest_need_kick = 0; PTNET_Q_UNLOCK(pq); /* Wait for rescheduling to finish. */ if (pq->taskq) { taskqueue_drain(pq->taskq, &pq->task); } } ifp->if_drv_flags |= IFF_DRV_RUNNING; } else { err = ether_poll_deregister(ifp); for (i = 0; i < sc->num_rings; i++) { pq = sc-> queues + i; PTNET_Q_LOCK(pq); pq->ptring->guest_need_kick = 1; PTNET_Q_UNLOCK(pq); } } } #endif /* DEVICE_POLLING */ ifp->if_capenable = ifr->ifr_reqcap; break; case SIOCSIFMTU: /* We support any reasonable MTU. */ if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > PTNET_MAX_PKT_SIZE) { err = EINVAL; } else { PTNET_CORE_LOCK(sc); ifp->if_mtu = ifr->ifr_mtu; PTNET_CORE_UNLOCK(sc); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: err = ifmedia_ioctl(ifp, ifr, &sc->media, cmd); break; default: err = ether_ioctl(ifp, cmd, data); break; } return err; } static int ptnet_init_locked(struct ptnet_softc *sc) { if_t ifp = sc->ifp; struct netmap_adapter *na_dr = &sc->ptna->dr.up; struct netmap_adapter *na_nm = &sc->ptna->hwup.up; unsigned int nm_buf_size; int ret; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { return 0; /* nothing to do */ } device_printf(sc->dev, "%s\n", __func__); /* Translate offload capabilities according to if_capenable. */ ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= PTNET_CSUM_OFFLOAD; if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) ifp->if_hwassist |= PTNET_CSUM_OFFLOAD_IPV6; if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_IP_TSO; if (ifp->if_capenable & IFCAP_TSO6) ifp->if_hwassist |= CSUM_IP6_TSO; /* * Prepare the interface for netmap mode access. */ netmap_update_config(na_dr); ret = netmap_mem_finalize(na_dr->nm_mem, na_dr); if (ret) { device_printf(sc->dev, "netmap_mem_finalize() failed\n"); return ret; } if (sc->ptna->backend_regifs == 0) { ret = ptnet_nm_krings_create(na_nm); if (ret) { device_printf(sc->dev, "ptnet_nm_krings_create() " "failed\n"); goto err_mem_finalize; } ret = netmap_mem_rings_create(na_dr); if (ret) { device_printf(sc->dev, "netmap_mem_rings_create() " "failed\n"); goto err_rings_create; } ret = netmap_mem_get_lut(na_dr->nm_mem, &na_dr->na_lut); if (ret) { device_printf(sc->dev, "netmap_mem_get_lut() " "failed\n"); goto err_get_lut; } } ret = ptnet_nm_register(na_dr, 1 /* on */); if (ret) { goto err_register; } nm_buf_size = NETMAP_BUF_SIZE(na_dr); KASSERT(nm_buf_size > 0, ("Invalid netmap buffer size")); sc->min_tx_space = PTNET_MAX_PKT_SIZE / nm_buf_size + 2; device_printf(sc->dev, "%s: min_tx_space = %u\n", __func__, sc->min_tx_space); #ifdef PTNETMAP_STATS callout_reset(&sc->tick, hz, ptnet_tick, sc); #endif ifp->if_drv_flags |= IFF_DRV_RUNNING; return 0; err_register: memset(&na_dr->na_lut, 0, sizeof(na_dr->na_lut)); err_get_lut: netmap_mem_rings_delete(na_dr); err_rings_create: ptnet_nm_krings_delete(na_nm); err_mem_finalize: netmap_mem_deref(na_dr->nm_mem, na_dr); return ret; } /* To be called under core lock. */ static int ptnet_stop(struct ptnet_softc *sc) { if_t ifp = sc->ifp; struct netmap_adapter *na_dr = &sc->ptna->dr.up; struct netmap_adapter *na_nm = &sc->ptna->hwup.up; int i; device_printf(sc->dev, "%s\n", __func__); if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { return 0; /* nothing to do */ } /* Clear the driver-ready flag, and synchronize with all the queues, * so that after this loop we are sure nobody is working anymore with * the device. This scheme is taken from the vtnet driver. */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; callout_stop(&sc->tick); for (i = 0; i < sc->num_rings; i++) { PTNET_Q_LOCK(sc->queues + i); PTNET_Q_UNLOCK(sc->queues + i); } ptnet_nm_register(na_dr, 0 /* off */); if (sc->ptna->backend_regifs == 0) { netmap_mem_rings_delete(na_dr); ptnet_nm_krings_delete(na_nm); } netmap_mem_deref(na_dr->nm_mem, na_dr); return 0; } static void ptnet_qflush(if_t ifp) { struct ptnet_softc *sc = if_getsoftc(ifp); int i; /* Flush all the bufrings and do the interface flush. */ for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; struct mbuf *m; PTNET_Q_LOCK(pq); if (pq->bufring) { while ((m = buf_ring_dequeue_sc(pq->bufring))) { m_freem(m); } } PTNET_Q_UNLOCK(pq); } if_qflush(ifp); } static int ptnet_media_change(if_t ifp) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ifmedia *ifm = &sc->media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { return EINVAL; } return 0; } #if __FreeBSD_version >= 1100000 static uint64_t ptnet_get_counter(if_t ifp, ift_counter cnt) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ptnet_queue_stats stats[2]; int i; /* Accumulate statistics over the queues. */ memset(stats, 0, sizeof(stats)); for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; int idx = (i < sc->num_tx_rings) ? 0 : 1; stats[idx].packets += pq->stats.packets; stats[idx].bytes += pq->stats.bytes; stats[idx].errors += pq->stats.errors; stats[idx].iqdrops += pq->stats.iqdrops; stats[idx].mcasts += pq->stats.mcasts; } switch (cnt) { case IFCOUNTER_IPACKETS: return (stats[1].packets); case IFCOUNTER_IQDROPS: return (stats[1].iqdrops); case IFCOUNTER_IERRORS: return (stats[1].errors); case IFCOUNTER_OPACKETS: return (stats[0].packets); case IFCOUNTER_OBYTES: return (stats[0].bytes); case IFCOUNTER_OMCASTS: return (stats[0].mcasts); default: return (if_get_counter_default(ifp, cnt)); } } #endif #ifdef PTNETMAP_STATS /* Called under core lock. */ static void ptnet_tick(void *opaque) { struct ptnet_softc *sc = opaque; int i; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; struct ptnet_queue_stats cur = pq->stats; struct timeval now; unsigned int delta; microtime(&now); delta = now.tv_usec - sc->last_ts.tv_usec + (now.tv_sec - sc->last_ts.tv_sec) * 1000000; delta /= 1000; /* in milliseconds */ if (delta == 0) continue; device_printf(sc->dev, "#%d[%u ms]:pkts %lu, kicks %lu, " "intr %lu\n", i, delta, (cur.packets - pq->last_stats.packets), (cur.kicks - pq->last_stats.kicks), (cur.intrs - pq->last_stats.intrs)); pq->last_stats = cur; } microtime(&sc->last_ts); callout_schedule(&sc->tick, hz); } #endif /* PTNETMAP_STATS */ static void ptnet_media_status(if_t ifp, struct ifmediareq *ifmr) { /* We are always active, as the backend netmap port is * always open in netmap mode. */ ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; ifmr->ifm_active = IFM_ETHER | IFM_10G_T | IFM_FDX; } static uint32_t ptnet_nm_ptctl(if_t ifp, uint32_t cmd) { struct ptnet_softc *sc = if_getsoftc(ifp); /* * Write a command and read back error status, * with zero meaning success. */ bus_write_4(sc->iomem, PTNET_IO_PTCTL, cmd); return bus_read_4(sc->iomem, PTNET_IO_PTCTL); } static int ptnet_nm_config(struct netmap_adapter *na, unsigned *txr, unsigned *txd, unsigned *rxr, unsigned *rxd) { struct ptnet_softc *sc = if_getsoftc(na->ifp); *txr = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_RINGS); *rxr = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_RINGS); *txd = bus_read_4(sc->iomem, PTNET_IO_NUM_TX_SLOTS); *rxd = bus_read_4(sc->iomem, PTNET_IO_NUM_RX_SLOTS); device_printf(sc->dev, "txr %u, rxr %u, txd %u, rxd %u\n", *txr, *rxr, *txd, *rxd); return 0; } static void ptnet_sync_from_csb(struct ptnet_softc *sc, struct netmap_adapter *na) { int i; /* Sync krings from the host, reading from * CSB. */ for (i = 0; i < sc->num_rings; i++) { struct ptnet_ring *ptring = sc->queues[i].ptring; struct netmap_kring *kring; if (i < na->num_tx_rings) { kring = na->tx_rings + i; } else { kring = na->rx_rings + i - na->num_tx_rings; } kring->rhead = kring->ring->head = ptring->head; kring->rcur = kring->ring->cur = ptring->cur; kring->nr_hwcur = ptring->hwcur; kring->nr_hwtail = kring->rtail = kring->ring->tail = ptring->hwtail; ND("%d,%d: csb {hc %u h %u c %u ht %u}", t, i, ptring->hwcur, ptring->head, ptring->cur, ptring->hwtail); ND("%d,%d: kring {hc %u rh %u rc %u h %u c %u ht %u rt %u t %u}", t, i, kring->nr_hwcur, kring->rhead, kring->rcur, kring->ring->head, kring->ring->cur, kring->nr_hwtail, kring->rtail, kring->ring->tail); } } static void ptnet_update_vnet_hdr(struct ptnet_softc *sc) { unsigned int wanted_hdr_len = ptnet_vnet_hdr ? PTNET_HDR_SIZE : 0; bus_write_4(sc->iomem, PTNET_IO_VNET_HDR_LEN, wanted_hdr_len); sc->vnet_hdr_len = bus_read_4(sc->iomem, PTNET_IO_VNET_HDR_LEN); sc->ptna->hwup.up.virt_hdr_len = sc->vnet_hdr_len; } static int ptnet_nm_register(struct netmap_adapter *na, int onoff) { /* device-specific */ if_t ifp = na->ifp; struct ptnet_softc *sc = if_getsoftc(ifp); int native = (na == &sc->ptna->hwup.up); struct ptnet_queue *pq; enum txrx t; int ret = 0; int i; if (!onoff) { sc->ptna->backend_regifs--; } /* If this is the last netmap client, guest interrupt enable flags may * be in arbitrary state. Since these flags are going to be used also * by the netdevice driver, we have to make sure to start with * notifications enabled. Also, schedule NAPI to flush pending packets * in the RX rings, since we will not receive further interrupts * until these will be processed. */ if (native && !onoff && na->active_fds == 0) { D("Exit netmap mode, re-enable interrupts"); for (i = 0; i < sc->num_rings; i++) { pq = sc->queues + i; pq->ptring->guest_need_kick = 1; } } if (onoff) { if (sc->ptna->backend_regifs == 0) { /* Initialize notification enable fields in the CSB. */ for (i = 0; i < sc->num_rings; i++) { pq = sc->queues + i; pq->ptring->host_need_kick = 1; pq->ptring->guest_need_kick = (!(ifp->if_capenable & IFCAP_POLLING) && i >= sc->num_tx_rings); } /* Set the virtio-net header length. */ ptnet_update_vnet_hdr(sc); /* Make sure the host adapter passed through is ready * for txsync/rxsync. */ ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_CREATE); if (ret) { return ret; } } /* Sync from CSB must be done after REGIF PTCTL. Skip this * step only if this is a netmap client and it is not the * first one. */ if ((!native && sc->ptna->backend_regifs == 0) || (native && na->active_fds == 0)) { ptnet_sync_from_csb(sc, na); } /* If not native, don't call nm_set_native_flags, since we don't want * to replace if_transmit method, nor set NAF_NETMAP_ON */ if (native) { for_rx_tx(t) { for (i = 0; i <= nma_get_nrings(na, t); i++) { struct netmap_kring *kring = &NMR(na, t)[i]; if (nm_kring_pending_on(kring)) { kring->nr_mode = NKR_NETMAP_ON; } } } nm_set_native_flags(na); } } else { if (native) { nm_clear_native_flags(na); for_rx_tx(t) { for (i = 0; i <= nma_get_nrings(na, t); i++) { struct netmap_kring *kring = &NMR(na, t)[i]; if (nm_kring_pending_off(kring)) { kring->nr_mode = NKR_NETMAP_OFF; } } } } /* Sync from CSB must be done before UNREGIF PTCTL, on the last * netmap client. */ if (native && na->active_fds == 0) { ptnet_sync_from_csb(sc, na); } if (sc->ptna->backend_regifs == 0) { ret = ptnet_nm_ptctl(ifp, PTNETMAP_PTCTL_DELETE); } } if (onoff) { sc->ptna->backend_regifs++; } return ret; } static int ptnet_nm_txsync(struct netmap_kring *kring, int flags) { struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); struct ptnet_queue *pq = sc->queues + kring->ring_id; bool notify; notify = netmap_pt_guest_txsync(pq->ptring, kring, flags); if (notify) { ptnet_kick(pq); } return 0; } static int ptnet_nm_rxsync(struct netmap_kring *kring, int flags) { struct ptnet_softc *sc = if_getsoftc(kring->na->ifp); struct ptnet_queue *pq = sc->rxqueues + kring->ring_id; bool notify; notify = netmap_pt_guest_rxsync(pq->ptring, kring, flags); if (notify) { ptnet_kick(pq); } return 0; } static void ptnet_tx_intr(void *opaque) { struct ptnet_queue *pq = opaque; struct ptnet_softc *sc = pq->sc; DBG(device_printf(sc->dev, "Tx interrupt #%d\n", pq->kring_id)); #ifdef PTNETMAP_STATS pq->stats.intrs ++; #endif /* PTNETMAP_STATS */ if (netmap_tx_irq(sc->ifp, pq->kring_id) != NM_IRQ_PASS) { return; } /* Schedule the tasqueue to flush process transmissions requests. * However, vtnet, if_em and if_igb just call ptnet_transmit() here, * at least when using MSI-X interrupts. The if_em driver, instead * schedule taskqueue when using legacy interrupts. */ taskqueue_enqueue(pq->taskq, &pq->task); } static void ptnet_rx_intr(void *opaque) { struct ptnet_queue *pq = opaque; struct ptnet_softc *sc = pq->sc; unsigned int unused; DBG(device_printf(sc->dev, "Rx interrupt #%d\n", pq->kring_id)); #ifdef PTNETMAP_STATS pq->stats.intrs ++; #endif /* PTNETMAP_STATS */ if (netmap_rx_irq(sc->ifp, pq->kring_id, &unused) != NM_IRQ_PASS) { return; } /* Like vtnet, if_igb and if_em drivers when using MSI-X interrupts, * receive-side processing is executed directly in the interrupt * service routine. Alternatively, we may schedule the taskqueue. */ ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); } /* The following offloadings-related functions are taken from the vtnet * driver, but the same functionality is required for the ptnet driver. * As a temporary solution, I copied this code from vtnet and I started * to generalize it (taking away driver-specific statistic accounting), * making as little modifications as possible. * In the future we need to share these functions between vtnet and ptnet. */ static int ptnet_tx_offload_ctx(struct mbuf *m, int *etype, int *proto, int *start) { struct ether_vlan_header *evh; int offset; evh = mtod(m, struct ether_vlan_header *); if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { /* BMV: We should handle nested VLAN tags too. */ *etype = ntohs(evh->evl_proto); offset = sizeof(struct ether_vlan_header); } else { *etype = ntohs(evh->evl_encap_proto); offset = sizeof(struct ether_header); } switch (*etype) { #if defined(INET) case ETHERTYPE_IP: { struct ip *ip, iphdr; if (__predict_false(m->m_len < offset + sizeof(struct ip))) { m_copydata(m, offset, sizeof(struct ip), (caddr_t) &iphdr); ip = &iphdr; } else ip = (struct ip *)(m->m_data + offset); *proto = ip->ip_p; *start = offset + (ip->ip_hl << 2); break; } #endif #if defined(INET6) case ETHERTYPE_IPV6: *proto = -1; *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); /* Assert the network stack sent us a valid packet. */ KASSERT(*start > offset, ("%s: mbuf %p start %d offset %d proto %d", __func__, m, *start, offset, *proto)); break; #endif default: /* Here we should increment the tx_csum_bad_ethtype counter. */ return (EINVAL); } return (0); } static int ptnet_tx_offload_tso(if_t ifp, struct mbuf *m, int eth_type, int offset, bool allow_ecn, struct virtio_net_hdr *hdr) { static struct timeval lastecn; static int curecn; struct tcphdr *tcp, tcphdr; if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); tcp = &tcphdr; } else tcp = (struct tcphdr *)(m->m_data + offset); hdr->hdr_len = offset + (tcp->th_off << 2); hdr->gso_size = m->m_pkthdr.tso_segsz; hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6; if (tcp->th_flags & TH_CWR) { /* * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, * ECN support is not on a per-interface basis, but globally via * the net.inet.tcp.ecn.enable sysctl knob. The default is off. */ if (!allow_ecn) { if (ppsratecheck(&lastecn, &curecn, 1)) if_printf(ifp, "TSO with ECN not negotiated with host\n"); return (ENOTSUP); } hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; } /* Here we should increment tx_tso counter. */ return (0); } static struct mbuf * ptnet_tx_offload(if_t ifp, struct mbuf *m, bool allow_ecn, struct virtio_net_hdr *hdr) { int flags, etype, csum_start, proto, error; flags = m->m_pkthdr.csum_flags; error = ptnet_tx_offload_ctx(m, &etype, &proto, &csum_start); if (error) goto drop; if ((etype == ETHERTYPE_IP && flags & PTNET_CSUM_OFFLOAD) || (etype == ETHERTYPE_IPV6 && flags & PTNET_CSUM_OFFLOAD_IPV6)) { /* * We could compare the IP protocol vs the CSUM_ flag too, * but that really should not be necessary. */ hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->csum_start = csum_start; hdr->csum_offset = m->m_pkthdr.csum_data; /* Here we should increment the tx_csum counter. */ } if (flags & CSUM_TSO) { if (__predict_false(proto != IPPROTO_TCP)) { /* Likely failed to correctly parse the mbuf. * Here we should increment the tx_tso_not_tcp * counter. */ goto drop; } KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, ("%s: mbuf %p TSO without checksum offload %#x", __func__, m, flags)); error = ptnet_tx_offload_tso(ifp, m, etype, csum_start, allow_ecn, hdr); if (error) goto drop; } return (m); drop: m_freem(m); return (NULL); } static void ptnet_vlan_tag_remove(struct mbuf *m) { struct ether_vlan_header *evh; evh = mtod(m, struct ether_vlan_header *); m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); m->m_flags |= M_VLANTAG; /* Strip the 802.1Q header. */ bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, ETHER_HDR_LEN - ETHER_TYPE_LEN); m_adj(m, ETHER_VLAN_ENCAP_LEN); } /* * Use the checksum offset in the VirtIO header to set the * correct CSUM_* flags. */ static int ptnet_rx_csum_by_offset(struct mbuf *m, uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) { #if defined(INET) || defined(INET6) int offset = hdr->csum_start + hdr->csum_offset; #endif /* Only do a basic sanity check on the offset. */ switch (eth_type) { #if defined(INET) case ETHERTYPE_IP: if (__predict_false(offset < ip_start + sizeof(struct ip))) return (1); break; #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) return (1); break; #endif default: /* Here we should increment the rx_csum_bad_ethtype counter. */ return (1); } /* * Use the offset to determine the appropriate CSUM_* flags. This is * a bit dirty, but we can get by with it since the checksum offsets * happen to be different. We assume the host host does not do IPv4 * header checksum offloading. */ switch (hdr->csum_offset) { case offsetof(struct udphdr, uh_sum): case offsetof(struct tcphdr, th_sum): m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case offsetof(struct sctphdr, checksum): m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: /* Here we should increment the rx_csum_bad_offset counter. */ return (1); } return (0); } static int ptnet_rx_csum_by_parse(struct mbuf *m, uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) { int offset, proto; switch (eth_type) { #if defined(INET) case ETHERTYPE_IP: { struct ip *ip; if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) return (1); ip = (struct ip *)(m->m_data + ip_start); proto = ip->ip_p; offset = ip_start + (ip->ip_hl << 2); break; } #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(m->m_len < ip_start + sizeof(struct ip6_hdr))) return (1); offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); if (__predict_false(offset < 0)) return (1); break; #endif default: /* Here we should increment the rx_csum_bad_ethtype counter. */ return (1); } switch (proto) { case IPPROTO_TCP: if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case IPPROTO_UDP: if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case IPPROTO_SCTP: if (__predict_false(m->m_len < offset + sizeof(struct sctphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: /* * For the remaining protocols, FreeBSD does not support * checksum offloading, so the checksum will be recomputed. */ #if 0 if_printf(ifp, "cksum offload of unsupported " "protocol eth_type=%#x proto=%d csum_start=%d " "csum_offset=%d\n", __func__, eth_type, proto, hdr->csum_start, hdr->csum_offset); #endif break; } return (0); } /* * Set the appropriate CSUM_* flags. Unfortunately, the information * provided is not directly useful to us. The VirtIO header gives the * offset of the checksum, which is all Linux needs, but this is not * how FreeBSD does things. We are forced to peek inside the packet * a bit. * * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD * could accept the offsets and let the stack figure it out. */ static int ptnet_rx_csum(struct mbuf *m, struct virtio_net_hdr *hdr) { struct ether_header *eh; struct ether_vlan_header *evh; uint16_t eth_type; int offset, error; eh = mtod(m, struct ether_header *); eth_type = ntohs(eh->ether_type); if (eth_type == ETHERTYPE_VLAN) { /* BMV: We should handle nested VLAN tags too. */ evh = mtod(m, struct ether_vlan_header *); eth_type = ntohs(evh->evl_proto); offset = sizeof(struct ether_vlan_header); } else offset = sizeof(struct ether_header); if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) error = ptnet_rx_csum_by_offset(m, eth_type, offset, hdr); else error = ptnet_rx_csum_by_parse(m, eth_type, offset, hdr); return (error); } /* End of offloading-related functions to be shared with vtnet. */ static inline void ptnet_sync_tail(struct ptnet_ring *ptring, struct netmap_kring *kring) { struct netmap_ring *ring = kring->ring; /* Update hwcur and hwtail as known by the host. */ ptnetmap_guest_read_kring_csb(ptring, kring); /* nm_sync_finalize */ ring->tail = kring->rtail = kring->nr_hwtail; } static void ptnet_ring_update(struct ptnet_queue *pq, struct netmap_kring *kring, unsigned int head, unsigned int sync_flags) { struct netmap_ring *ring = kring->ring; struct ptnet_ring *ptring = pq->ptring; /* Some packets have been pushed to the netmap ring. We have * to tell the host to process the new packets, updating cur * and head in the CSB. */ ring->head = ring->cur = head; /* Mimic nm_txsync_prologue/nm_rxsync_prologue. */ kring->rcur = kring->rhead = head; ptnetmap_guest_write_kring_csb(ptring, kring->rcur, kring->rhead); /* Kick the host if needed. */ if (NM_ACCESS_ONCE(ptring->host_need_kick)) { ptring->sync_flags = sync_flags; ptnet_kick(pq); } } #define PTNET_TX_NOSPACE(_h, _k, _min) \ ((((_h) < (_k)->rtail) ? 0 : (_k)->nkr_num_slots) + \ (_k)->rtail - (_h)) < (_min) /* This function may be called by the network stack, or by * by the taskqueue thread. */ static int ptnet_drain_transmit_queue(struct ptnet_queue *pq, unsigned int budget, bool may_resched) { struct ptnet_softc *sc = pq->sc; bool have_vnet_hdr = sc->vnet_hdr_len; struct netmap_adapter *na = &sc->ptna->dr.up; if_t ifp = sc->ifp; unsigned int batch_count = 0; struct ptnet_ring *ptring; struct netmap_kring *kring; struct netmap_ring *ring; struct netmap_slot *slot; unsigned int count = 0; unsigned int minspace; unsigned int head; unsigned int lim; struct mbuf *mhead; struct mbuf *mf; int nmbuf_bytes; uint8_t *nmbuf; if (!PTNET_Q_TRYLOCK(pq)) { /* We failed to acquire the lock, schedule the taskqueue. */ RD(1, "Deferring TX work"); if (may_resched) { taskqueue_enqueue(pq->taskq, &pq->task); } return 0; } if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { PTNET_Q_UNLOCK(pq); RD(1, "Interface is down"); return ENETDOWN; } ptring = pq->ptring; kring = na->tx_rings + pq->kring_id; ring = kring->ring; lim = kring->nkr_num_slots - 1; head = ring->head; minspace = sc->min_tx_space; while (count < budget) { if (PTNET_TX_NOSPACE(head, kring, minspace)) { /* We ran out of slot, let's see if the host has * freed up some, by reading hwcur and hwtail from * the CSB. */ ptnet_sync_tail(ptring, kring); if (PTNET_TX_NOSPACE(head, kring, minspace)) { /* Still no slots available. Reactivate the * interrupts so that we can be notified * when some free slots are made available by * the host. */ ptring->guest_need_kick = 1; /* Double-check. */ ptnet_sync_tail(ptring, kring); if (likely(PTNET_TX_NOSPACE(head, kring, minspace))) { break; } RD(1, "Found more slots by doublecheck"); /* More slots were freed before reactivating * the interrupts. */ ptring->guest_need_kick = 0; } } mhead = drbr_peek(ifp, pq->bufring); if (!mhead) { break; } /* Initialize transmission state variables. */ slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_bytes = 0; /* If needed, prepare the virtio-net header at the beginning * of the first slot. */ if (have_vnet_hdr) { struct virtio_net_hdr *vh = (struct virtio_net_hdr *)nmbuf; /* For performance, we could replace this memset() with * two 8-bytes-wide writes. */ memset(nmbuf, 0, PTNET_HDR_SIZE); if (mhead->m_pkthdr.csum_flags & PTNET_ALL_OFFLOAD) { mhead = ptnet_tx_offload(ifp, mhead, false, vh); if (unlikely(!mhead)) { /* Packet dropped because errors * occurred while preparing the vnet * header. Let's go ahead with the next * packet. */ pq->stats.errors ++; drbr_advance(ifp, pq->bufring); continue; } } ND(1, "%s: [csum_flags %lX] vnet hdr: flags %x " "csum_start %u csum_ofs %u hdr_len = %u " "gso_size %u gso_type %x", __func__, mhead->m_pkthdr.csum_flags, vh->flags, vh->csum_start, vh->csum_offset, vh->hdr_len, vh->gso_size, vh->gso_type); nmbuf += PTNET_HDR_SIZE; nmbuf_bytes += PTNET_HDR_SIZE; } for (mf = mhead; mf; mf = mf->m_next) { uint8_t *mdata = mf->m_data; int mlen = mf->m_len; for (;;) { int copy = NETMAP_BUF_SIZE(na) - nmbuf_bytes; if (mlen < copy) { copy = mlen; } memcpy(nmbuf, mdata, copy); mdata += copy; mlen -= copy; nmbuf += copy; nmbuf_bytes += copy; if (!mlen) { break; } slot->len = nmbuf_bytes; slot->flags = NS_MOREFRAG; head = nm_next(head, lim); KASSERT(head != ring->tail, ("Unexpectedly run out of TX space")); slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_bytes = 0; } } /* Complete last slot and update head. */ slot->len = nmbuf_bytes; slot->flags = 0; head = nm_next(head, lim); /* Consume the packet just processed. */ drbr_advance(ifp, pq->bufring); /* Copy the packet to listeners. */ ETHER_BPF_MTAP(ifp, mhead); pq->stats.packets ++; pq->stats.bytes += mhead->m_pkthdr.len; if (mhead->m_flags & M_MCAST) { pq->stats.mcasts ++; } m_freem(mhead); count ++; if (++batch_count == PTNET_TX_BATCH) { ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); batch_count = 0; } } if (batch_count) { ptnet_ring_update(pq, kring, head, NAF_FORCE_RECLAIM); } if (count >= budget && may_resched) { DBG(RD(1, "out of budget: resched, %d mbufs pending\n", drbr_inuse(ifp, pq->bufring))); taskqueue_enqueue(pq->taskq, &pq->task); } PTNET_Q_UNLOCK(pq); return count; } static int ptnet_transmit(if_t ifp, struct mbuf *m) { struct ptnet_softc *sc = if_getsoftc(ifp); struct ptnet_queue *pq; unsigned int queue_idx; int err; DBG(device_printf(sc->dev, "transmit %p\n", m)); /* Insert 802.1Q header if needed. */ if (m->m_flags & M_VLANTAG) { m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); if (m == NULL) { return ENOBUFS; } m->m_flags &= ~M_VLANTAG; } /* Get the flow-id if available. */ queue_idx = (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) ? m->m_pkthdr.flowid : curcpu; if (unlikely(queue_idx >= sc->num_tx_rings)) { queue_idx %= sc->num_tx_rings; } pq = sc->queues + queue_idx; err = drbr_enqueue(ifp, pq->bufring, m); if (err) { /* ENOBUFS when the bufring is full */ RD(1, "%s: drbr_enqueue() failed %d\n", __func__, err); pq->stats.errors ++; return err; } if (ifp->if_capenable & IFCAP_POLLING) { /* If polling is on, the transmit queues will be * drained by the poller. */ return 0; } err = ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); return (err < 0) ? err : 0; } static unsigned int ptnet_rx_discard(struct netmap_kring *kring, unsigned int head) { struct netmap_ring *ring = kring->ring; struct netmap_slot *slot = ring->slot + head; for (;;) { head = nm_next(head, kring->nkr_num_slots - 1); if (!(slot->flags & NS_MOREFRAG) || head == ring->tail) { break; } slot = ring->slot + head; } return head; } static inline struct mbuf * ptnet_rx_slot(struct mbuf *mtail, uint8_t *nmbuf, unsigned int nmbuf_len) { uint8_t *mdata = mtod(mtail, uint8_t *) + mtail->m_len; do { unsigned int copy; if (mtail->m_len == MCLBYTES) { struct mbuf *mf; mf = m_getcl(M_NOWAIT, MT_DATA, 0); if (unlikely(!mf)) { return NULL; } mtail->m_next = mf; mtail = mf; mdata = mtod(mtail, uint8_t *); mtail->m_len = 0; } copy = MCLBYTES - mtail->m_len; if (nmbuf_len < copy) { copy = nmbuf_len; } memcpy(mdata, nmbuf, copy); nmbuf += copy; nmbuf_len -= copy; mdata += copy; mtail->m_len += copy; } while (nmbuf_len); return mtail; } static int ptnet_rx_eof(struct ptnet_queue *pq, unsigned int budget, bool may_resched) { struct ptnet_softc *sc = pq->sc; bool have_vnet_hdr = sc->vnet_hdr_len; struct ptnet_ring *ptring = pq->ptring; struct netmap_adapter *na = &sc->ptna->dr.up; struct netmap_kring *kring = na->rx_rings + pq->kring_id; struct netmap_ring *ring = kring->ring; unsigned int const lim = kring->nkr_num_slots - 1; unsigned int head = ring->head; unsigned int batch_count = 0; if_t ifp = sc->ifp; unsigned int count = 0; PTNET_Q_LOCK(pq); if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { goto unlock; } kring->nr_kflags &= ~NKR_PENDINTR; while (count < budget) { unsigned int prev_head = head; struct mbuf *mhead, *mtail; struct virtio_net_hdr *vh; struct netmap_slot *slot; unsigned int nmbuf_len; uint8_t *nmbuf; host_sync: if (head == ring->tail) { /* We ran out of slot, let's see if the host has * added some, by reading hwcur and hwtail from * the CSB. */ ptnet_sync_tail(ptring, kring); if (head == ring->tail) { /* Still no slots available. Reactivate * interrupts as they were disabled by the * host thread right before issuing the * last interrupt. */ ptring->guest_need_kick = 1; /* Double-check. */ ptnet_sync_tail(ptring, kring); if (likely(head == ring->tail)) { break; } ptring->guest_need_kick = 0; } } /* Initialize ring state variables, possibly grabbing the * virtio-net header. */ slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_len = slot->len; vh = (struct virtio_net_hdr *)nmbuf; if (have_vnet_hdr) { if (unlikely(nmbuf_len < PTNET_HDR_SIZE)) { /* There is no good reason why host should * put the header in multiple netmap slots. * If this is the case, discard. */ RD(1, "Fragmented vnet-hdr: dropping"); head = ptnet_rx_discard(kring, head); pq->stats.iqdrops ++; goto skip; } ND(1, "%s: vnet hdr: flags %x csum_start %u " "csum_ofs %u hdr_len = %u gso_size %u " "gso_type %x", __func__, vh->flags, vh->csum_start, vh->csum_offset, vh->hdr_len, vh->gso_size, vh->gso_type); nmbuf += PTNET_HDR_SIZE; nmbuf_len -= PTNET_HDR_SIZE; } /* Allocate the head of a new mbuf chain. * We use m_getcl() to allocate an mbuf with standard cluster * size (MCLBYTES). In the future we could use m_getjcl() * to choose different sizes. */ mhead = mtail = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (unlikely(mhead == NULL)) { device_printf(sc->dev, "%s: failed to allocate mbuf " "head\n", __func__); pq->stats.errors ++; break; } /* Initialize the mbuf state variables. */ mhead->m_pkthdr.len = nmbuf_len; mtail->m_len = 0; /* Scan all the netmap slots containing the current packet. */ for (;;) { DBG(device_printf(sc->dev, "%s: h %u t %u rcv frag " "len %u, flags %u\n", __func__, head, ring->tail, slot->len, slot->flags)); mtail = ptnet_rx_slot(mtail, nmbuf, nmbuf_len); if (unlikely(!mtail)) { /* Ouch. We ran out of memory while processing * a packet. We have to restore the previous * head position, free the mbuf chain, and * schedule the taskqueue to give the packet * another chance. */ device_printf(sc->dev, "%s: failed to allocate" " mbuf frag, reset head %u --> %u\n", __func__, head, prev_head); head = prev_head; m_freem(mhead); pq->stats.errors ++; if (may_resched) { taskqueue_enqueue(pq->taskq, &pq->task); } goto escape; } /* We have to increment head irrespective of the * NS_MOREFRAG being set or not. */ head = nm_next(head, lim); if (!(slot->flags & NS_MOREFRAG)) { break; } if (unlikely(head == ring->tail)) { /* The very last slot prepared by the host has * the NS_MOREFRAG set. Drop it and continue * the outer cycle (to do the double-check). */ RD(1, "Incomplete packet: dropping"); m_freem(mhead); pq->stats.iqdrops ++; goto host_sync; } slot = ring->slot + head; nmbuf = NMB(na, slot); nmbuf_len = slot->len; mhead->m_pkthdr.len += nmbuf_len; } mhead->m_pkthdr.rcvif = ifp; mhead->m_pkthdr.csum_flags = 0; /* Store the queue idx in the packet header. */ mhead->m_pkthdr.flowid = pq->kring_id; M_HASHTYPE_SET(mhead, M_HASHTYPE_OPAQUE); if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { struct ether_header *eh; eh = mtod(mhead, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { ptnet_vlan_tag_remove(mhead); /* * With the 802.1Q header removed, update the * checksum starting location accordingly. */ if (vh->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) vh->csum_start -= ETHER_VLAN_ENCAP_LEN; } } if (have_vnet_hdr && (vh->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID))) { if (unlikely(ptnet_rx_csum(mhead, vh))) { m_freem(mhead); RD(1, "Csum offload error: dropping"); pq->stats.iqdrops ++; goto skip; } } pq->stats.packets ++; pq->stats.bytes += mhead->m_pkthdr.len; PTNET_Q_UNLOCK(pq); (*ifp->if_input)(ifp, mhead); PTNET_Q_LOCK(pq); if (unlikely(!(ifp->if_drv_flags & IFF_DRV_RUNNING))) { /* The interface has gone down while we didn't * have the lock. Stop any processing and exit. */ goto unlock; } skip: count ++; if (++batch_count == PTNET_RX_BATCH) { /* Some packets have been pushed to the network stack. * We need to update the CSB to tell the host about the new * ring->cur and ring->head (RX buffer refill). */ ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); batch_count = 0; } } escape: if (batch_count) { ptnet_ring_update(pq, kring, head, NAF_FORCE_READ); } if (count >= budget && may_resched) { /* If we ran out of budget or the double-check found new * slots to process, schedule the taskqueue. */ DBG(RD(1, "out of budget: resched h %u t %u\n", head, ring->tail)); taskqueue_enqueue(pq->taskq, &pq->task); } unlock: PTNET_Q_UNLOCK(pq); return count; } static void ptnet_rx_task(void *context, int pending) { struct ptnet_queue *pq = context; DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); ptnet_rx_eof(pq, PTNET_RX_BUDGET, true); } static void ptnet_tx_task(void *context, int pending) { struct ptnet_queue *pq = context; DBG(RD(1, "%s: pq #%u\n", __func__, pq->kring_id)); ptnet_drain_transmit_queue(pq, PTNET_TX_BUDGET, true); } #ifdef DEVICE_POLLING /* We don't need to handle differently POLL_AND_CHECK_STATUS and * POLL_ONLY, since we don't have an Interrupt Status Register. */ static int ptnet_poll(if_t ifp, enum poll_cmd cmd, int budget) { struct ptnet_softc *sc = if_getsoftc(ifp); unsigned int queue_budget; unsigned int count = 0; bool borrow = false; int i; KASSERT(sc->num_rings > 0, ("Found no queues in while polling ptnet")); queue_budget = MAX(budget / sc->num_rings, 1); RD(1, "Per-queue budget is %d", queue_budget); while (budget) { unsigned int rcnt = 0; for (i = 0; i < sc->num_rings; i++) { struct ptnet_queue *pq = sc->queues + i; if (borrow) { queue_budget = MIN(queue_budget, budget); if (queue_budget == 0) { break; } } if (i < sc->num_tx_rings) { rcnt += ptnet_drain_transmit_queue(pq, queue_budget, false); } else { rcnt += ptnet_rx_eof(pq, queue_budget, false); } } if (!rcnt) { /* A scan of the queues gave no result, we can * stop here. */ break; } if (rcnt > budget) { /* This may happen when initial budget < sc->num_rings, * since one packet budget is given to each queue * anyway. Just pretend we didn't eat "so much". */ rcnt = budget; } count += rcnt; budget -= rcnt; borrow = true; } return count; } #endif /* DEVICE_POLLING */ Index: head/sys/dev/nvme/nvme_ns.c =================================================================== --- head/sys/dev/nvme/nvme_ns.c (revision 327948) +++ head/sys/dev/nvme/nvme_ns.c (revision 327949) @@ -1,584 +1,585 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (C) 2012-2013 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include "nvme_private.h" static void nvme_bio_child_inbed(struct bio *parent, int bio_error); static void nvme_bio_child_done(void *arg, const struct nvme_completion *cpl); static uint32_t nvme_get_num_segments(uint64_t addr, uint64_t size, uint32_t alignment); static void nvme_free_child_bios(int num_bios, struct bio **child_bios); static struct bio ** nvme_allocate_child_bios(int num_bios); static struct bio ** nvme_construct_child_bios(struct bio *bp, uint32_t alignment, int *num_bios); static int nvme_ns_split_bio(struct nvme_namespace *ns, struct bio *bp, uint32_t alignment); static int nvme_ns_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int flag, struct thread *td) { struct nvme_namespace *ns; struct nvme_controller *ctrlr; struct nvme_pt_command *pt; ns = cdev->si_drv1; ctrlr = ns->ctrlr; switch (cmd) { case NVME_IO_TEST: case NVME_BIO_TEST: nvme_ns_test(ns, cmd, arg); break; case NVME_PASSTHROUGH_CMD: pt = (struct nvme_pt_command *)arg; return (nvme_ctrlr_passthrough_cmd(ctrlr, pt, ns->id, 1 /* is_user_buffer */, 0 /* is_admin_cmd */)); case DIOCGMEDIASIZE: *(off_t *)arg = (off_t)nvme_ns_get_size(ns); break; case DIOCGSECTORSIZE: *(u_int *)arg = nvme_ns_get_sector_size(ns); break; default: return (ENOTTY); } return (0); } static int nvme_ns_open(struct cdev *dev __unused, int flags, int fmt __unused, struct thread *td) { int error = 0; if (flags & FWRITE) error = securelevel_gt(td->td_ucred, 0); return (error); } static int nvme_ns_close(struct cdev *dev __unused, int flags, int fmt __unused, struct thread *td) { return (0); } static void nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl) { struct bio *bp = arg; /* * TODO: add more extensive translation of NVMe status codes * to different bio error codes (i.e. EIO, EINVAL, etc.) */ if (nvme_completion_is_error(cpl)) { bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; } else bp->bio_resid = 0; biodone(bp); } static void nvme_ns_strategy(struct bio *bp) { struct nvme_namespace *ns; int err; ns = bp->bio_dev->si_drv1; err = nvme_ns_bio_process(ns, bp, nvme_ns_strategy_done); if (err) { bp->bio_error = err; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; biodone(bp); } } static struct cdevsw nvme_ns_cdevsw = { .d_version = D_VERSION, .d_flags = D_DISK, .d_read = physread, .d_write = physwrite, .d_open = nvme_ns_open, .d_close = nvme_ns_close, .d_strategy = nvme_ns_strategy, .d_ioctl = nvme_ns_ioctl }; uint32_t nvme_ns_get_max_io_xfer_size(struct nvme_namespace *ns) { return ns->ctrlr->max_xfer_size; } uint32_t nvme_ns_get_sector_size(struct nvme_namespace *ns) { return (1 << ns->data.lbaf[ns->data.flbas.format].lbads); } uint64_t nvme_ns_get_num_sectors(struct nvme_namespace *ns) { return (ns->data.nsze); } uint64_t nvme_ns_get_size(struct nvme_namespace *ns) { return (nvme_ns_get_num_sectors(ns) * nvme_ns_get_sector_size(ns)); } uint32_t nvme_ns_get_flags(struct nvme_namespace *ns) { return (ns->flags); } const char * nvme_ns_get_serial_number(struct nvme_namespace *ns) { return ((const char *)ns->ctrlr->cdata.sn); } const char * nvme_ns_get_model_number(struct nvme_namespace *ns) { return ((const char *)ns->ctrlr->cdata.mn); } const struct nvme_namespace_data * nvme_ns_get_data(struct nvme_namespace *ns) { return (&ns->data); } uint32_t nvme_ns_get_stripesize(struct nvme_namespace *ns) { return (ns->stripesize); } static void nvme_ns_bio_done(void *arg, const struct nvme_completion *status) { struct bio *bp = arg; nvme_cb_fn_t bp_cb_fn; bp_cb_fn = bp->bio_driver1; if (bp->bio_driver2) free(bp->bio_driver2, M_NVME); if (nvme_completion_is_error(status)) { bp->bio_flags |= BIO_ERROR; if (bp->bio_error == 0) bp->bio_error = EIO; } if ((bp->bio_flags & BIO_ERROR) == 0) bp->bio_resid = 0; else bp->bio_resid = bp->bio_bcount; bp_cb_fn(bp, status); } static void nvme_bio_child_inbed(struct bio *parent, int bio_error) { struct nvme_completion parent_cpl; int children, inbed; if (bio_error != 0) { parent->bio_flags |= BIO_ERROR; parent->bio_error = bio_error; } /* * atomic_fetchadd will return value before adding 1, so we still * must add 1 to get the updated inbed number. Save bio_children * before incrementing to guard against race conditions when * two children bios complete on different queues. */ children = atomic_load_acq_int(&parent->bio_children); inbed = atomic_fetchadd_int(&parent->bio_inbed, 1) + 1; if (inbed == children) { bzero(&parent_cpl, sizeof(parent_cpl)); if (parent->bio_flags & BIO_ERROR) parent_cpl.status.sc = NVME_SC_DATA_TRANSFER_ERROR; nvme_ns_bio_done(parent, &parent_cpl); } } static void nvme_bio_child_done(void *arg, const struct nvme_completion *cpl) { struct bio *child = arg; struct bio *parent; int bio_error; parent = child->bio_parent; g_destroy_bio(child); bio_error = nvme_completion_is_error(cpl) ? EIO : 0; nvme_bio_child_inbed(parent, bio_error); } static uint32_t nvme_get_num_segments(uint64_t addr, uint64_t size, uint32_t align) { uint32_t num_segs, offset, remainder; if (align == 0) return (1); KASSERT((align & (align - 1)) == 0, ("alignment not power of 2\n")); num_segs = size / align; remainder = size & (align - 1); offset = addr & (align - 1); if (remainder > 0 || offset > 0) num_segs += 1 + (remainder + offset - 1) / align; return (num_segs); } static void nvme_free_child_bios(int num_bios, struct bio **child_bios) { int i; for (i = 0; i < num_bios; i++) { if (child_bios[i] != NULL) g_destroy_bio(child_bios[i]); } free(child_bios, M_NVME); } static struct bio ** nvme_allocate_child_bios(int num_bios) { struct bio **child_bios; int err = 0, i; - child_bios = malloc(num_bios * sizeof(struct bio *), M_NVME, M_NOWAIT); + child_bios = mallocarray(num_bios, sizeof(struct bio *), M_NVME, + M_NOWAIT); if (child_bios == NULL) return (NULL); for (i = 0; i < num_bios; i++) { child_bios[i] = g_new_bio(); if (child_bios[i] == NULL) err = ENOMEM; } if (err == ENOMEM) { nvme_free_child_bios(num_bios, child_bios); return (NULL); } return (child_bios); } static struct bio ** nvme_construct_child_bios(struct bio *bp, uint32_t alignment, int *num_bios) { struct bio **child_bios; struct bio *child; uint64_t cur_offset; caddr_t data; uint32_t rem_bcount; int i; #ifdef NVME_UNMAPPED_BIO_SUPPORT struct vm_page **ma; uint32_t ma_offset; #endif *num_bios = nvme_get_num_segments(bp->bio_offset, bp->bio_bcount, alignment); child_bios = nvme_allocate_child_bios(*num_bios); if (child_bios == NULL) return (NULL); bp->bio_children = *num_bios; bp->bio_inbed = 0; cur_offset = bp->bio_offset; rem_bcount = bp->bio_bcount; data = bp->bio_data; #ifdef NVME_UNMAPPED_BIO_SUPPORT ma_offset = bp->bio_ma_offset; ma = bp->bio_ma; #endif for (i = 0; i < *num_bios; i++) { child = child_bios[i]; child->bio_parent = bp; child->bio_cmd = bp->bio_cmd; child->bio_offset = cur_offset; child->bio_bcount = min(rem_bcount, alignment - (cur_offset & (alignment - 1))); child->bio_flags = bp->bio_flags; #ifdef NVME_UNMAPPED_BIO_SUPPORT if (bp->bio_flags & BIO_UNMAPPED) { child->bio_ma_offset = ma_offset; child->bio_ma = ma; child->bio_ma_n = nvme_get_num_segments(child->bio_ma_offset, child->bio_bcount, PAGE_SIZE); ma_offset = (ma_offset + child->bio_bcount) & PAGE_MASK; ma += child->bio_ma_n; if (ma_offset != 0) ma -= 1; } else #endif { child->bio_data = data; data += child->bio_bcount; } cur_offset += child->bio_bcount; rem_bcount -= child->bio_bcount; } return (child_bios); } static int nvme_ns_split_bio(struct nvme_namespace *ns, struct bio *bp, uint32_t alignment) { struct bio *child; struct bio **child_bios; int err, i, num_bios; child_bios = nvme_construct_child_bios(bp, alignment, &num_bios); if (child_bios == NULL) return (ENOMEM); for (i = 0; i < num_bios; i++) { child = child_bios[i]; err = nvme_ns_bio_process(ns, child, nvme_bio_child_done); if (err != 0) { nvme_bio_child_inbed(bp, err); g_destroy_bio(child); } } free(child_bios, M_NVME); return (0); } int nvme_ns_bio_process(struct nvme_namespace *ns, struct bio *bp, nvme_cb_fn_t cb_fn) { struct nvme_dsm_range *dsm_range; uint32_t num_bios; int err; bp->bio_driver1 = cb_fn; if (ns->stripesize > 0 && (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE)) { num_bios = nvme_get_num_segments(bp->bio_offset, bp->bio_bcount, ns->stripesize); if (num_bios > 1) return (nvme_ns_split_bio(ns, bp, ns->stripesize)); } switch (bp->bio_cmd) { case BIO_READ: err = nvme_ns_cmd_read_bio(ns, bp, nvme_ns_bio_done, bp); break; case BIO_WRITE: err = nvme_ns_cmd_write_bio(ns, bp, nvme_ns_bio_done, bp); break; case BIO_FLUSH: err = nvme_ns_cmd_flush(ns, nvme_ns_bio_done, bp); break; case BIO_DELETE: dsm_range = malloc(sizeof(struct nvme_dsm_range), M_NVME, M_ZERO | M_WAITOK); dsm_range->length = bp->bio_bcount/nvme_ns_get_sector_size(ns); dsm_range->starting_lba = bp->bio_offset/nvme_ns_get_sector_size(ns); bp->bio_driver2 = dsm_range; err = nvme_ns_cmd_deallocate(ns, dsm_range, 1, nvme_ns_bio_done, bp); if (err != 0) free(dsm_range, M_NVME); break; default: err = EIO; break; } return (err); } int nvme_ns_construct(struct nvme_namespace *ns, uint32_t id, struct nvme_controller *ctrlr) { struct nvme_completion_poll_status status; int unit; ns->ctrlr = ctrlr; ns->id = id; ns->stripesize = 0; if (pci_get_devid(ctrlr->dev) == 0x09538086 && ctrlr->cdata.vs[3] != 0) ns->stripesize = (1 << ctrlr->cdata.vs[3]) * ctrlr->min_page_size; /* * Namespaces are reconstructed after a controller reset, so check * to make sure we only call mtx_init once on each mtx. * * TODO: Move this somewhere where it gets called at controller * construction time, which is not invoked as part of each * controller reset. */ if (!mtx_initialized(&ns->lock)) mtx_init(&ns->lock, "nvme ns lock", NULL, MTX_DEF); status.done = FALSE; nvme_ctrlr_cmd_identify_namespace(ctrlr, id, &ns->data, nvme_completion_poll_cb, &status); while (status.done == FALSE) DELAY(5); if (nvme_completion_is_error(&status.cpl)) { nvme_printf(ctrlr, "nvme_identify_namespace failed\n"); return (ENXIO); } /* * If the size of is zero, chances are this isn't a valid * namespace (eg one that's not been configured yet). The * standard says the entire id will be zeros, so this is a * cheap way to test for that. */ if (ns->data.nsze == 0) return (ENXIO); /* * Note: format is a 0-based value, so > is appropriate here, * not >=. */ if (ns->data.flbas.format > ns->data.nlbaf) { printf("lba format %d exceeds number supported (%d)\n", ns->data.flbas.format, ns->data.nlbaf+1); return (ENXIO); } if (ctrlr->cdata.oncs.dsm) ns->flags |= NVME_NS_DEALLOCATE_SUPPORTED; if (ctrlr->cdata.vwc.present) ns->flags |= NVME_NS_FLUSH_SUPPORTED; /* * cdev may have already been created, if we are reconstructing the * namespace after a controller-level reset. */ if (ns->cdev != NULL) return (0); /* * Namespace IDs start at 1, so we need to subtract 1 to create a * correct unit number. */ unit = device_get_unit(ctrlr->dev) * NVME_MAX_NAMESPACES + ns->id - 1; /* * MAKEDEV_ETERNAL was added in r210923, for cdevs that will never * be destroyed. This avoids refcounting on the cdev object. * That should be OK case here, as long as we're not supporting PCIe * surprise removal nor namespace deletion. */ #ifdef MAKEDEV_ETERNAL_KLD ns->cdev = make_dev_credf(MAKEDEV_ETERNAL_KLD, &nvme_ns_cdevsw, unit, NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d", device_get_unit(ctrlr->dev), ns->id); #else ns->cdev = make_dev_credf(0, &nvme_ns_cdevsw, unit, NULL, UID_ROOT, GID_WHEEL, 0600, "nvme%dns%d", device_get_unit(ctrlr->dev), ns->id); #endif #ifdef NVME_UNMAPPED_BIO_SUPPORT ns->cdev->si_flags |= SI_UNMAPPED; #endif if (ns->cdev != NULL) ns->cdev->si_drv1 = ns; return (0); } void nvme_ns_destruct(struct nvme_namespace *ns) { if (ns->cdev != NULL) destroy_dev(ns->cdev); } Index: head/sys/dev/pst/pst-iop.c =================================================================== --- head/sys/dev/pst/pst-iop.c (revision 327948) +++ head/sys/dev/pst/pst-iop.c (revision 327949) @@ -1,504 +1,504 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (c) 2001,2002,2003 Søren Schmidt * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "dev/pst/pst-iop.h" struct iop_request { struct i2o_single_reply *reply; u_int32_t mfa; }; /* local vars */ MALLOC_DEFINE(M_PSTIOP, "PSTIOP", "Promise SuperTrak IOP driver"); int iop_init(struct iop_softc *sc) { int mfa, timeout = 10000; while ((mfa = sc->reg->iqueue) == 0xffffffff && --timeout) DELAY(1000); if (!timeout) { printf("pstiop: no free mfa\n"); return 0; } iop_free_mfa(sc, mfa); sc->reg->oqueue_intr_mask = 0xffffffff; if (!iop_reset(sc)) { printf("pstiop: no reset response\n"); return 0; } if (!iop_init_outqueue(sc)) { printf("pstiop: init outbound queue failed\n"); return 0; } /* register iop_attach to be run when interrupts are enabled */ if (!(sc->iop_delayed_attach = (struct intr_config_hook *) malloc(sizeof(struct intr_config_hook), M_PSTIOP, M_NOWAIT | M_ZERO))) { printf("pstiop: malloc of delayed attach hook failed\n"); return 0; } sc->iop_delayed_attach->ich_func = iop_attach; sc->iop_delayed_attach->ich_arg = sc; if (config_intrhook_establish(sc->iop_delayed_attach)) { printf("pstiop: config_intrhook_establish failed\n"); free(sc->iop_delayed_attach, M_PSTIOP); } return 1; } void iop_attach(void *arg) { struct iop_softc *sc; int i; sc = arg; if (sc->iop_delayed_attach) { config_intrhook_disestablish(sc->iop_delayed_attach); free(sc->iop_delayed_attach, M_PSTIOP); sc->iop_delayed_attach = NULL; } if (!iop_get_lct(sc)) { printf("pstiop: get LCT failed\n"); return; } /* figure out what devices are here and config as needed */ for (i = 0; sc->lct[i].entry_size == I2O_LCT_ENTRYSIZE; i++) { #ifdef PSTDEBUG struct i2o_get_param_reply *reply; printf("pstiop: LCT entry %d ", i); printf("class=%04x ", sc->lct[i].class); printf("sub=%04x ", sc->lct[i].sub_class); printf("localtid=%04x ", sc->lct[i].local_tid); printf("usertid=%04x ", sc->lct[i].user_tid); printf("parentid=%04x\n", sc->lct[i].parent_tid); if ((reply = iop_get_util_params(sc, sc->lct[i].local_tid, I2O_PARAMS_OPERATION_FIELD_GET, I2O_UTIL_DEVICE_IDENTITY_GROUP_NO))) { struct i2o_device_identity *ident = (struct i2o_device_identity *)reply->result; printf("pstiop: vendor=<%.16s> product=<%.16s>\n", ident->vendor, ident->product); printf("pstiop: description=<%.16s> revision=<%.8s>\n", ident->description, ident->revision); contigfree(reply, PAGE_SIZE, M_PSTIOP); } #endif if (sc->lct[i].user_tid != I2O_TID_NONE && sc->lct[i].user_tid != I2O_TID_HOST) continue; switch (sc->lct[i].class) { case I2O_CLASS_DDM: if (sc->lct[i].sub_class == I2O_SUBCLASS_ISM) sc->ism = sc->lct[i].local_tid; break; case I2O_CLASS_RANDOM_BLOCK_STORAGE: pst_add_raid(sc, &sc->lct[i]); break; } } /* setup and enable interrupts */ bus_setup_intr(sc->dev, sc->r_irq, INTR_TYPE_BIO|INTR_ENTROPY|INTR_MPSAFE, NULL, iop_intr, sc, &sc->handle); sc->reg->oqueue_intr_mask = 0x0; } void iop_intr(void *data) { struct iop_softc *sc = (struct iop_softc *)data; struct i2o_single_reply *reply; u_int32_t mfa; /* we might get more than one finished request pr interrupt */ mtx_lock(&sc->mtx); while (1) { if ((mfa = sc->reg->oqueue) == 0xffffffff) if ((mfa = sc->reg->oqueue) == 0xffffffff) break; reply = (struct i2o_single_reply *)(sc->obase + (mfa - sc->phys_obase)); /* if this is an event register reply, shout! */ if (reply->function == I2O_UTIL_EVENT_REGISTER) { struct i2o_util_event_reply_message *event = (struct i2o_util_event_reply_message *)reply; printf("pstiop: EVENT!! idx=%08x data=%08x\n", event->event_mask, event->event_data[0]); break; } /* if reply is a failurenotice we need to free the original mfa */ if (reply->message_flags & I2O_MESSAGE_FLAGS_FAIL) iop_free_mfa(sc,((struct i2o_fault_reply *)(reply))->preserved_mfa); /* reply->initiator_context points to the service routine */ ((void (*)(struct iop_softc *, u_int32_t, struct i2o_single_reply *)) (reply->initiator_context))(sc, mfa, reply); } mtx_unlock(&sc->mtx); } int iop_reset(struct iop_softc *sc) { struct i2o_exec_iop_reset_message *msg; int mfa, timeout = 5000; volatile u_int32_t reply = 0; mfa = iop_get_mfa(sc); msg = (struct i2o_exec_iop_reset_message *)(sc->ibase + mfa); bzero(msg, sizeof(struct i2o_exec_iop_reset_message)); msg->version_offset = 0x1; msg->message_flags = 0x0; msg->message_size = sizeof(struct i2o_exec_iop_reset_message) >> 2; msg->target_address = I2O_TID_IOP; msg->initiator_address = I2O_TID_HOST; msg->function = I2O_EXEC_IOP_RESET; msg->status_word_low_addr = vtophys(&reply); msg->status_word_high_addr = 0; sc->reg->iqueue = mfa; while (--timeout && !reply) DELAY(1000); /* wait for iqueue ready */ timeout = 10000; while ((mfa = sc->reg->iqueue) == 0xffffffff && --timeout) DELAY(1000); iop_free_mfa(sc, mfa); return reply; } int iop_init_outqueue(struct iop_softc *sc) { struct i2o_exec_init_outqueue_message *msg; int i, mfa, timeout = 5000; volatile u_int32_t reply = 0; if (!(sc->obase = contigmalloc(I2O_IOP_OUTBOUND_FRAME_COUNT * I2O_IOP_OUTBOUND_FRAME_SIZE, M_PSTIOP, M_NOWAIT, 0x00010000, 0xFFFFFFFF, PAGE_SIZE, 0))) { printf("pstiop: contigmalloc of outqueue buffers failed!\n"); return 0; } sc->phys_obase = vtophys(sc->obase); mfa = iop_get_mfa(sc); msg = (struct i2o_exec_init_outqueue_message *)(sc->ibase + mfa); bzero(msg, sizeof(struct i2o_exec_init_outqueue_message)); msg->version_offset = 0x61; msg->message_flags = 0x0; msg->message_size = sizeof(struct i2o_exec_init_outqueue_message) >> 2; msg->target_address = I2O_TID_IOP; msg->initiator_address = I2O_TID_HOST; msg->function = I2O_EXEC_OUTBOUND_INIT; msg->host_pagesize = PAGE_SIZE; msg->init_code = 0x00; /* SOS XXX should be 0x80 == OS */ msg->queue_framesize = I2O_IOP_OUTBOUND_FRAME_SIZE / sizeof(u_int32_t); msg->sgl[0].flags = I2O_SGL_SIMPLE | I2O_SGL_END | I2O_SGL_EOB; msg->sgl[0].count = sizeof(reply); msg->sgl[0].phys_addr[0] = vtophys(&reply); msg->sgl[1].flags = I2O_SGL_END | I2O_SGL_EOB; msg->sgl[1].count = 1; msg->sgl[1].phys_addr[0] = 0; sc->reg->iqueue = mfa; /* wait for init to complete */ while (--timeout && reply != I2O_EXEC_OUTBOUND_INIT_COMPLETE) DELAY(1000); if (!timeout) { printf("pstiop: timeout waiting for init-complete response\n"); iop_free_mfa(sc, mfa); return 0; } /* now init our oqueue bufs */ for (i = 0; i < I2O_IOP_OUTBOUND_FRAME_COUNT; i++) { sc->reg->oqueue = sc->phys_obase + (i * I2O_IOP_OUTBOUND_FRAME_SIZE); DELAY(1000); } return 1; } int iop_get_lct(struct iop_softc *sc) { struct i2o_exec_get_lct_message *msg; struct i2o_get_lct_reply *reply; int mfa; #define ALLOCSIZE (PAGE_SIZE + (256 * sizeof(struct i2o_lct_entry))) if (!(reply = contigmalloc(ALLOCSIZE, M_PSTIOP, M_NOWAIT | M_ZERO, 0x00010000, 0xFFFFFFFF, PAGE_SIZE, 0))) return 0; mfa = iop_get_mfa(sc); msg = (struct i2o_exec_get_lct_message *)(sc->ibase + mfa); bzero(msg, sizeof(struct i2o_exec_get_lct_message)); msg->version_offset = 0x61; msg->message_flags = 0x0; msg->message_size = sizeof(struct i2o_exec_get_lct_message) >> 2; msg->target_address = I2O_TID_IOP; msg->initiator_address = I2O_TID_HOST; msg->function = I2O_EXEC_LCT_NOTIFY; msg->class = I2O_CLASS_MATCH_ANYCLASS; msg->last_change_id = 0; msg->sgl.flags = I2O_SGL_SIMPLE | I2O_SGL_END | I2O_SGL_EOB; msg->sgl.count = ALLOCSIZE; msg->sgl.phys_addr[0] = vtophys(reply); if (iop_queue_wait_msg(sc, mfa, (struct i2o_basic_message *)msg)) { contigfree(reply, ALLOCSIZE, M_PSTIOP); return 0; } - if (!(sc->lct = malloc(reply->table_size * sizeof(struct i2o_lct_entry), + if (!(sc->lct = mallocarray(reply->table_size, sizeof(struct i2o_lct_entry), M_PSTIOP, M_NOWAIT | M_ZERO))) { contigfree(reply, ALLOCSIZE, M_PSTIOP); return 0; } bcopy(&reply->entry[0], sc->lct, reply->table_size * sizeof(struct i2o_lct_entry)); sc->lct_count = reply->table_size; contigfree(reply, ALLOCSIZE, M_PSTIOP); return 1; } struct i2o_get_param_reply * iop_get_util_params(struct iop_softc *sc, int target, int operation, int group) { struct i2o_util_get_param_message *msg; struct i2o_get_param_operation *param; struct i2o_get_param_reply *reply; int mfa; if (!(param = contigmalloc(PAGE_SIZE, M_PSTIOP, M_NOWAIT | M_ZERO, 0x00010000, 0xFFFFFFFF, PAGE_SIZE, 0))) return NULL; if (!(reply = contigmalloc(PAGE_SIZE, M_PSTIOP, M_NOWAIT | M_ZERO, 0x00010000, 0xFFFFFFFF, PAGE_SIZE, 0))) return NULL; mfa = iop_get_mfa(sc); msg = (struct i2o_util_get_param_message *)(sc->ibase + mfa); bzero(msg, sizeof(struct i2o_util_get_param_message)); msg->version_offset = 0x51; msg->message_flags = 0x0; msg->message_size = sizeof(struct i2o_util_get_param_message) >> 2; msg->target_address = target; msg->initiator_address = I2O_TID_HOST; msg->function = I2O_UTIL_PARAMS_GET; msg->operation_flags = 0; param->operation_count = 1; param->operation[0].operation = operation; param->operation[0].group = group; param->operation[0].field_count = 0xffff; msg->sgl[0].flags = I2O_SGL_SIMPLE | I2O_SGL_DIR | I2O_SGL_EOB; msg->sgl[0].count = sizeof(struct i2o_get_param_operation); msg->sgl[0].phys_addr[0] = vtophys(param); msg->sgl[1].flags = I2O_SGL_SIMPLE | I2O_SGL_END | I2O_SGL_EOB; msg->sgl[1].count = PAGE_SIZE; msg->sgl[1].phys_addr[0] = vtophys(reply); if (iop_queue_wait_msg(sc, mfa, (struct i2o_basic_message *)msg) || reply->error_info_size) { contigfree(reply, PAGE_SIZE, M_PSTIOP); reply = NULL; } contigfree(param, PAGE_SIZE, M_PSTIOP); return reply; } u_int32_t iop_get_mfa(struct iop_softc *sc) { u_int32_t mfa; int timeout = 10000; while ((mfa = sc->reg->iqueue) == 0xffffffff && timeout) { DELAY(1000); timeout--; } if (!timeout) printf("pstiop: no free mfa\n"); return mfa; } void iop_free_mfa(struct iop_softc *sc, int mfa) { struct i2o_basic_message *msg = (struct i2o_basic_message *)(sc->ibase+mfa); bzero(msg, sizeof(struct i2o_basic_message)); msg->version = 0x01; msg->message_flags = 0x0; msg->message_size = sizeof(struct i2o_basic_message) >> 2; msg->target_address = I2O_TID_IOP; msg->initiator_address = I2O_TID_HOST; msg->function = I2O_UTIL_NOP; sc->reg->iqueue = mfa; } static void iop_done(struct iop_softc *sc, u_int32_t mfa, struct i2o_single_reply *reply) { struct iop_request *request = (struct iop_request *)reply->transaction_context; request->reply = reply; request->mfa = mfa; wakeup(request); } int iop_queue_wait_msg(struct iop_softc *sc, int mfa, struct i2o_basic_message *msg) { struct i2o_single_reply *reply; struct iop_request request; u_int32_t out_mfa; int status, timeout = 10000; mtx_lock(&sc->mtx); if (!(sc->reg->oqueue_intr_mask & 0x08)) { msg->transaction_context = (u_int32_t)&request; msg->initiator_context = (u_int32_t)iop_done; sc->reg->iqueue = mfa; if (msleep(&request, &sc->mtx, PRIBIO, "pstwt", 10 * hz)) { printf("pstiop: timeout waiting for message response\n"); iop_free_mfa(sc, mfa); mtx_unlock(&sc->mtx); return -1; } status = request.reply->status; sc->reg->oqueue = request.mfa; } else { sc->reg->iqueue = mfa; while (--timeout && ((out_mfa = sc->reg->oqueue) == 0xffffffff)) DELAY(1000); if (!timeout) { printf("pstiop: timeout waiting for message response\n"); iop_free_mfa(sc, mfa); mtx_unlock(&sc->mtx); return -1; } reply = (struct i2o_single_reply *)(sc->obase+(out_mfa-sc->phys_obase)); status = reply->status; sc->reg->oqueue = out_mfa; } mtx_unlock(&sc->mtx); return status; } int iop_create_sgl(struct i2o_basic_message *msg, caddr_t data, int count, int dir) { struct i2o_sgl *sgl = (struct i2o_sgl *)((int32_t *)msg + msg->offset); u_int32_t sgl_count, sgl_phys; int i = 0; if (((uintptr_t)data & 3) || (count & 3)) { printf("pstiop: non aligned DMA transfer attempted\n"); return 0; } if (!count) { printf("pstiop: zero length DMA transfer attempted\n"); return 0; } sgl_count = min(count, (PAGE_SIZE - ((uintptr_t)data & PAGE_MASK))); sgl_phys = vtophys(data); sgl->flags = dir | I2O_SGL_PAGELIST | I2O_SGL_EOB | I2O_SGL_END; sgl->count = count; data += sgl_count; count -= sgl_count; while (count) { sgl->phys_addr[i] = sgl_phys; sgl_phys = vtophys(data); data += min(count, PAGE_SIZE); count -= min(count, PAGE_SIZE); if (++i >= I2O_SGL_MAX_SEGS) { printf("pstiop: too many segments in SGL\n"); return 0; } } sgl->phys_addr[i] = sgl_phys; msg->message_size += i; return 1; } Index: head/sys/dev/ral/rt2560.c =================================================================== --- head/sys/dev/ral/rt2560.c (revision 327948) +++ head/sys/dev/ral/rt2560.c (revision 327949) @@ -1,2767 +1,2767 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2005, 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2560 chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define RT2560_RSSI(sc, rssi) \ ((rssi) > (RT2560_NOISE_FLOOR + (sc)->rssi_corr) ? \ ((rssi) - RT2560_NOISE_FLOOR - (sc)->rssi_corr) : 0) #define RAL_DEBUG #ifdef RAL_DEBUG #define DPRINTF(sc, fmt, ...) do { \ if (sc->sc_debug > 0) \ printf(fmt, __VA_ARGS__); \ } while (0) #define DPRINTFN(sc, n, fmt, ...) do { \ if (sc->sc_debug >= (n)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, fmt, ...) #define DPRINTFN(sc, n, fmt, ...) #endif static struct ieee80211vap *rt2560_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rt2560_vap_delete(struct ieee80211vap *); static void rt2560_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int rt2560_alloc_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *, int); static void rt2560_reset_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *); static void rt2560_free_tx_ring(struct rt2560_softc *, struct rt2560_tx_ring *); static int rt2560_alloc_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *, int); static void rt2560_reset_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *); static void rt2560_free_rx_ring(struct rt2560_softc *, struct rt2560_rx_ring *); static int rt2560_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint16_t rt2560_eeprom_read(struct rt2560_softc *, uint8_t); static void rt2560_encryption_intr(struct rt2560_softc *); static void rt2560_tx_intr(struct rt2560_softc *); static void rt2560_prio_intr(struct rt2560_softc *); static void rt2560_decryption_intr(struct rt2560_softc *); static void rt2560_rx_intr(struct rt2560_softc *); static void rt2560_beacon_update(struct ieee80211vap *, int item); static void rt2560_beacon_expire(struct rt2560_softc *); static void rt2560_wakeup_expire(struct rt2560_softc *); static void rt2560_scan_start(struct ieee80211com *); static void rt2560_scan_end(struct ieee80211com *); static void rt2560_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); static void rt2560_set_channel(struct ieee80211com *); static void rt2560_setup_tx_desc(struct rt2560_softc *, struct rt2560_tx_desc *, uint32_t, int, int, int, bus_addr_t); static int rt2560_tx_bcn(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static int rt2560_tx_mgt(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static int rt2560_tx_data(struct rt2560_softc *, struct mbuf *, struct ieee80211_node *); static int rt2560_transmit(struct ieee80211com *, struct mbuf *); static void rt2560_start(struct rt2560_softc *); static void rt2560_watchdog(void *); static void rt2560_parent(struct ieee80211com *); static void rt2560_bbp_write(struct rt2560_softc *, uint8_t, uint8_t); static uint8_t rt2560_bbp_read(struct rt2560_softc *, uint8_t); static void rt2560_rf_write(struct rt2560_softc *, uint8_t, uint32_t); static void rt2560_set_chan(struct rt2560_softc *, struct ieee80211_channel *); #if 0 static void rt2560_disable_rf_tune(struct rt2560_softc *); #endif static void rt2560_enable_tsf_sync(struct rt2560_softc *); static void rt2560_enable_tsf(struct rt2560_softc *); static void rt2560_update_plcp(struct rt2560_softc *); static void rt2560_update_slot(struct ieee80211com *); static void rt2560_set_basicrates(struct rt2560_softc *, const struct ieee80211_rateset *); static void rt2560_update_led(struct rt2560_softc *, int, int); static void rt2560_set_bssid(struct rt2560_softc *, const uint8_t *); static void rt2560_set_macaddr(struct rt2560_softc *, const uint8_t *); static void rt2560_get_macaddr(struct rt2560_softc *, uint8_t *); static void rt2560_update_promisc(struct ieee80211com *); static const char *rt2560_get_rf(int); static void rt2560_read_config(struct rt2560_softc *); static int rt2560_bbp_init(struct rt2560_softc *); static void rt2560_set_txantenna(struct rt2560_softc *, int); static void rt2560_set_rxantenna(struct rt2560_softc *, int); static void rt2560_init_locked(struct rt2560_softc *); static void rt2560_init(void *); static void rt2560_stop_locked(struct rt2560_softc *); static int rt2560_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static const struct { uint32_t reg; uint32_t val; } rt2560_def_mac[] = { RT2560_DEF_MAC }; static const struct { uint8_t reg; uint8_t val; } rt2560_def_bbp[] = { RT2560_DEF_BBP }; static const uint32_t rt2560_rf2522_r2[] = RT2560_RF2522_R2; static const uint32_t rt2560_rf2523_r2[] = RT2560_RF2523_R2; static const uint32_t rt2560_rf2524_r2[] = RT2560_RF2524_R2; static const uint32_t rt2560_rf2525_r2[] = RT2560_RF2525_R2; static const uint32_t rt2560_rf2525_hi_r2[] = RT2560_RF2525_HI_R2; static const uint32_t rt2560_rf2525e_r2[] = RT2560_RF2525E_R2; static const uint32_t rt2560_rf2526_r2[] = RT2560_RF2526_R2; static const uint32_t rt2560_rf2526_hi_r2[] = RT2560_RF2526_HI_R2; static const uint8_t rt2560_chan_2ghz[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; static const uint8_t rt2560_chan_5ghz[] = { 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161 }; static const struct { uint8_t chan; uint32_t r1, r2, r4; } rt2560_rf5222[] = { RT2560_RF5222 }; int rt2560_attach(device_t dev, int id) { struct rt2560_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; int error; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); /* retrieve RT2560 rev. no */ sc->asic_rev = RAL_READ(sc, RT2560_CSR0); /* retrieve RF rev. no and various other things from EEPROM */ rt2560_read_config(sc); device_printf(dev, "MAC/BBP RT2560 (rev 0x%02x), RF %s\n", sc->asic_rev, rt2560_get_rf(sc->rf_rev)); /* * Allocate Tx and Rx rings. */ error = rt2560_alloc_tx_ring(sc, &sc->txq, RT2560_TX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx ring\n"); goto fail1; } error = rt2560_alloc_tx_ring(sc, &sc->atimq, RT2560_ATIM_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate ATIM ring\n"); goto fail2; } error = rt2560_alloc_tx_ring(sc, &sc->prioq, RT2560_PRIO_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Prio ring\n"); goto fail3; } error = rt2560_alloc_tx_ring(sc, &sc->bcnq, RT2560_BEACON_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Beacon ring\n"); goto fail4; } error = rt2560_alloc_rx_ring(sc, &sc->rxq, RT2560_RX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx ring\n"); goto fail5; } /* retrieve MAC address */ rt2560_get_macaddr(sc, ic->ic_macaddr); ic->ic_softc = sc; ic->ic_name = device_get_nameunit(dev); ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ #ifdef notyet | IEEE80211_C_TXFRAG /* handle tx frags */ #endif ; rt2560_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ieee80211_ifattach(ic); ic->ic_raw_xmit = rt2560_raw_xmit; ic->ic_updateslot = rt2560_update_slot; ic->ic_update_promisc = rt2560_update_promisc; ic->ic_scan_start = rt2560_scan_start; ic->ic_scan_end = rt2560_scan_end; ic->ic_getradiocaps = rt2560_getradiocaps; ic->ic_set_channel = rt2560_set_channel; ic->ic_vap_create = rt2560_vap_create; ic->ic_vap_delete = rt2560_vap_delete; ic->ic_parent = rt2560_parent; ic->ic_transmit = rt2560_transmit; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RT2560_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RT2560_RX_RADIOTAP_PRESENT); /* * Add a few sysctl knobs. */ #ifdef RAL_DEBUG SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs"); #endif SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "txantenna", CTLFLAG_RW, &sc->tx_ant, 0, "tx antenna (0=auto)"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "rxantenna", CTLFLAG_RW, &sc->rx_ant, 0, "rx antenna (0=auto)"); if (bootverbose) ieee80211_announce(ic); return 0; fail5: rt2560_free_tx_ring(sc, &sc->bcnq); fail4: rt2560_free_tx_ring(sc, &sc->prioq); fail3: rt2560_free_tx_ring(sc, &sc->atimq); fail2: rt2560_free_tx_ring(sc, &sc->txq); fail1: mtx_destroy(&sc->sc_mtx); return ENXIO; } int rt2560_detach(void *xsc) { struct rt2560_softc *sc = xsc; struct ieee80211com *ic = &sc->sc_ic; rt2560_stop(sc); ieee80211_ifdetach(ic); mbufq_drain(&sc->sc_snd); rt2560_free_tx_ring(sc, &sc->txq); rt2560_free_tx_ring(sc, &sc->atimq); rt2560_free_tx_ring(sc, &sc->prioq); rt2560_free_tx_ring(sc, &sc->bcnq); rt2560_free_rx_ring(sc, &sc->rxq); mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * rt2560_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rt2560_softc *sc = ic->ic_softc; struct rt2560_vap *rvp; struct ieee80211vap *vap; switch (opmode) { case IEEE80211_M_STA: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: case IEEE80211_M_MONITOR: case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: /* XXXRP: TBD */ if (!TAILQ_EMPTY(&ic->ic_vaps)) { device_printf(sc->sc_dev, "only 1 vap supported\n"); return NULL; } if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: if (TAILQ_EMPTY(&ic->ic_vaps) || ic->ic_opmode != IEEE80211_M_HOSTAP) { device_printf(sc->sc_dev, "wds only supported in ap mode\n"); return NULL; } /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); return NULL; } rvp = malloc(sizeof(struct rt2560_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &rvp->ral_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override state transition machine */ rvp->ral_newstate = vap->iv_newstate; vap->iv_newstate = rt2560_newstate; vap->iv_update_beacon = rt2560_beacon_update; ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); if (TAILQ_FIRST(&ic->ic_vaps) == vap) ic->ic_opmode = opmode; return vap; } static void rt2560_vap_delete(struct ieee80211vap *vap) { struct rt2560_vap *rvp = RT2560_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } void rt2560_resume(void *xsc) { struct rt2560_softc *sc = xsc; if (sc->sc_ic.ic_nrunning > 0) rt2560_init(sc); } static void rt2560_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int rt2560_alloc_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring, int count) { int i, error; ring->count = count; ring->queued = 0; ring->cur = ring->next = 0; ring->cur_encrypt = ring->next_encrypt = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2560_TX_DESC_SIZE, 1, count * RT2560_TX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2560_TX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } - ring->data = malloc(count * sizeof (struct rt2560_tx_data), M_DEVBUF, + ring->data = mallocarray(count, sizeof(struct rt2560_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, RT2560_MAX_SCATTER, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { error = bus_dmamap_create(ring->data_dmat, 0, &ring->data[i].map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } } return 0; fail: rt2560_free_tx_ring(sc, ring); return error; } static void rt2560_reset_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring) { struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; int i; for (i = 0; i < ring->count; i++) { desc = &ring->desc[i]; data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } desc->flags = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->queued = 0; ring->cur = ring->next = 0; ring->cur_encrypt = ring->next_encrypt = 0; } static void rt2560_free_tx_ring(struct rt2560_softc *sc, struct rt2560_tx_ring *ring) { struct rt2560_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2560_alloc_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring, int count) { struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_addr_t physaddr; int i, error; ring->count = count; ring->cur = ring->next = 0; ring->cur_decrypt = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2560_RX_DESC_SIZE, 1, count * RT2560_RX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2560_RX_DESC_SIZE, rt2560_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } - ring->data = malloc(count * sizeof (struct rt2560_rx_data), M_DEVBUF, - M_NOWAIT | M_ZERO); + ring->data = mallocarray(count, sizeof (struct rt2560_rx_data), + M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } /* * Pre-allocate Rx buffers and populate Rx ring. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { desc = &sc->rxq.desc[i]; data = &sc->rxq.data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } desc->flags = htole32(RT2560_RX_BUSY); desc->physaddr = htole32(physaddr); } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); return 0; fail: rt2560_free_rx_ring(sc, ring); return error; } static void rt2560_reset_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring) { int i; for (i = 0; i < ring->count; i++) { ring->desc[i].flags = htole32(RT2560_RX_BUSY); ring->data[i].drop = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->cur = ring->next = 0; ring->cur_decrypt = 0; } static void rt2560_free_rx_ring(struct rt2560_softc *sc, struct rt2560_rx_ring *ring) { struct rt2560_rx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2560_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rt2560_vap *rvp = RT2560_VAP(vap); struct rt2560_softc *sc = vap->iv_ic->ic_softc; int error; if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) { /* abort TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); /* turn association led off */ rt2560_update_led(sc, 0, 0); } error = rvp->ral_newstate(vap, nstate, arg); if (error == 0 && nstate == IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; struct mbuf *m; if (vap->iv_opmode != IEEE80211_M_MONITOR) { rt2560_update_plcp(sc); rt2560_set_basicrates(sc, &ni->ni_rates); rt2560_set_bssid(sc, ni->ni_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_MBSS) { m = ieee80211_beacon_alloc(ni); if (m == NULL) { device_printf(sc->sc_dev, "could not allocate beacon\n"); return ENOBUFS; } ieee80211_ref_node(ni); error = rt2560_tx_bcn(sc, m, ni); if (error != 0) return error; } /* turn association led on */ rt2560_update_led(sc, 1, 0); if (vap->iv_opmode != IEEE80211_M_MONITOR) rt2560_enable_tsf_sync(sc); else rt2560_enable_tsf(sc); } return error; } /* * Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or * 93C66). */ static uint16_t rt2560_eeprom_read(struct rt2560_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ RT2560_EEPROM_CTL(sc, 0); RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); RT2560_EEPROM_CTL(sc, RT2560_S); /* write start bit (1) */ RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C); /* write READ opcode (10) */ RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_D | RT2560_C); RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); /* write address (A5-A0 or A7-A0) */ n = (RAL_READ(sc, RT2560_CSR21) & RT2560_93C46) ? 5 : 7; for (; n >= 0; n--) { RT2560_EEPROM_CTL(sc, RT2560_S | (((addr >> n) & 1) << RT2560_SHIFT_D)); RT2560_EEPROM_CTL(sc, RT2560_S | (((addr >> n) & 1) << RT2560_SHIFT_D) | RT2560_C); } RT2560_EEPROM_CTL(sc, RT2560_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { RT2560_EEPROM_CTL(sc, RT2560_S | RT2560_C); tmp = RAL_READ(sc, RT2560_CSR21); val |= ((tmp & RT2560_Q) >> RT2560_SHIFT_Q) << n; RT2560_EEPROM_CTL(sc, RT2560_S); } RT2560_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ RT2560_EEPROM_CTL(sc, RT2560_S); RT2560_EEPROM_CTL(sc, 0); RT2560_EEPROM_CTL(sc, RT2560_C); return val; } /* * Some frames were processed by the hardware cipher engine and are ready for * transmission. */ static void rt2560_encryption_intr(struct rt2560_softc *sc) { struct rt2560_tx_desc *desc; int hw; /* retrieve last descriptor index processed by cipher engine */ hw = RAL_READ(sc, RT2560_SECCSR1) - sc->txq.physaddr; hw /= RT2560_TX_DESC_SIZE; bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_POSTREAD); while (sc->txq.next_encrypt != hw) { if (sc->txq.next_encrypt == sc->txq.cur_encrypt) { printf("hw encrypt %d, cur_encrypt %d\n", hw, sc->txq.cur_encrypt); break; } desc = &sc->txq.desc[sc->txq.next_encrypt]; if ((le32toh(desc->flags) & RT2560_TX_BUSY) || (le32toh(desc->flags) & RT2560_TX_CIPHER_BUSY)) break; /* for TKIP, swap eiv field to fix a bug in ASIC */ if ((le32toh(desc->flags) & RT2560_TX_CIPHER_MASK) == RT2560_TX_CIPHER_TKIP) desc->eiv = bswap32(desc->eiv); /* mark the frame ready for transmission */ desc->flags |= htole32(RT2560_TX_VALID); desc->flags |= htole32(RT2560_TX_BUSY); DPRINTFN(sc, 15, "encryption done idx=%u\n", sc->txq.next_encrypt); sc->txq.next_encrypt = (sc->txq.next_encrypt + 1) % RT2560_TX_RING_COUNT; } bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); /* kick Tx */ RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_TX); } static void rt2560_tx_intr(struct rt2560_softc *sc) { struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct mbuf *m; struct ieee80211_node *ni; uint32_t flags; int status; bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_POSTREAD); txs->flags = IEEE80211_RATECTL_STATUS_LONG_RETRY; for (;;) { desc = &sc->txq.desc[sc->txq.next]; data = &sc->txq.data[sc->txq.next]; flags = le32toh(desc->flags); if ((flags & RT2560_TX_BUSY) || (flags & RT2560_TX_CIPHER_BUSY) || !(flags & RT2560_TX_VALID)) break; m = data->m; ni = data->ni; switch (flags & RT2560_TX_RESULT_MASK) { case RT2560_TX_SUCCESS: txs->status = IEEE80211_RATECTL_TX_SUCCESS; txs->long_retries = 0; DPRINTFN(sc, 10, "%s\n", "data frame sent successfully"); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(ni, txs); status = 0; break; case RT2560_TX_SUCCESS_RETRY: txs->status = IEEE80211_RATECTL_TX_SUCCESS; txs->long_retries = RT2560_TX_RETRYCNT(flags); DPRINTFN(sc, 9, "data frame sent after %u retries\n", txs->long_retries); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(ni, txs); status = 0; break; case RT2560_TX_FAIL_RETRY: txs->status = IEEE80211_RATECTL_TX_FAIL_LONG; txs->long_retries = RT2560_TX_RETRYCNT(flags); DPRINTFN(sc, 9, "data frame failed after %d retries\n", txs->long_retries); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(ni, txs); status = 1; break; case RT2560_TX_FAIL_INVALID: case RT2560_TX_FAIL_OTHER: default: device_printf(sc->sc_dev, "sending data frame failed " "0x%08x\n", flags); status = 1; } bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->txq.data_dmat, data->map); ieee80211_tx_complete(ni, m, status); data->ni = NULL; data->m = NULL; /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2560_TX_VALID); DPRINTFN(sc, 15, "tx done idx=%u\n", sc->txq.next); sc->txq.queued--; sc->txq.next = (sc->txq.next + 1) % RT2560_TX_RING_COUNT; } bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); if (sc->prioq.queued == 0 && sc->txq.queued == 0) sc->sc_tx_timer = 0; if (sc->txq.queued < RT2560_TX_RING_COUNT - 1) rt2560_start(sc); } static void rt2560_prio_intr(struct rt2560_softc *sc) { struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_node *ni; struct mbuf *m; int flags; bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &sc->prioq.desc[sc->prioq.next]; data = &sc->prioq.data[sc->prioq.next]; flags = le32toh(desc->flags); if ((flags & RT2560_TX_BUSY) || (flags & RT2560_TX_VALID) == 0) break; switch (flags & RT2560_TX_RESULT_MASK) { case RT2560_TX_SUCCESS: DPRINTFN(sc, 10, "%s\n", "mgt frame sent successfully"); break; case RT2560_TX_SUCCESS_RETRY: DPRINTFN(sc, 9, "mgt frame sent after %u retries\n", (flags >> 5) & 0x7); break; case RT2560_TX_FAIL_RETRY: DPRINTFN(sc, 9, "%s\n", "sending mgt frame failed (too much retries)"); break; case RT2560_TX_FAIL_INVALID: case RT2560_TX_FAIL_OTHER: default: device_printf(sc->sc_dev, "sending mgt frame failed " "0x%08x\n", flags); break; } bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->prioq.data_dmat, data->map); m = data->m; data->m = NULL; ni = data->ni; data->ni = NULL; /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2560_TX_VALID); DPRINTFN(sc, 15, "prio done idx=%u\n", sc->prioq.next); sc->prioq.queued--; sc->prioq.next = (sc->prioq.next + 1) % RT2560_PRIO_RING_COUNT; if (m->m_flags & M_TXCB) ieee80211_process_callback(ni, m, (flags & RT2560_TX_RESULT_MASK) &~ (RT2560_TX_SUCCESS | RT2560_TX_SUCCESS_RETRY)); m_freem(m); ieee80211_free_node(ni); } bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); if (sc->prioq.queued == 0 && sc->txq.queued == 0) sc->sc_tx_timer = 0; if (sc->prioq.queued < RT2560_PRIO_RING_COUNT) rt2560_start(sc); } /* * Some frames were processed by the hardware cipher engine and are ready for * handoff to the IEEE802.11 layer. */ static void rt2560_decryption_intr(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_addr_t physaddr; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *mnew, *m; int hw, error; int8_t rssi, nf; /* retrieve last descriptor index processed by cipher engine */ hw = RAL_READ(sc, RT2560_SECCSR0) - sc->rxq.physaddr; hw /= RT2560_RX_DESC_SIZE; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (; sc->rxq.cur_decrypt != hw;) { desc = &sc->rxq.desc[sc->rxq.cur_decrypt]; data = &sc->rxq.data[sc->rxq.cur_decrypt]; if ((le32toh(desc->flags) & RT2560_RX_BUSY) || (le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY)) break; if (data->drop) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } if ((le32toh(desc->flags) & RT2560_RX_CIPHER_MASK) != 0 && (le32toh(desc->flags) & RT2560_RX_ICV_ERROR)) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } /* * Try to allocate a new mbuf for this ring element and load it * before processing the current mbuf. If the ring element * cannot be loaded, drop the received packet and reuse the old * mbuf. In the unlikely case that the old mbuf can't be * reloaded either, explicitly panic. */ mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(mnew, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2560_dma_map_addr, &physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } counter_u64_add(ic->ic_ierrors, 1); goto skip; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = mnew; desc->physaddr = htole32(physaddr); /* finalize mbuf */ m->m_pkthdr.len = m->m_len = (le32toh(desc->flags) >> 16) & 0xfff; rssi = RT2560_RSSI(sc, desc->rssi); nf = RT2560_NOISE_FLOOR; if (ieee80211_radiotap_active(ic)) { struct rt2560_rx_radiotap_header *tap = &sc->sc_rxtap; uint32_t tsf_lo, tsf_hi; /* get timestamp (low and high 32 bits) */ tsf_hi = RAL_READ(sc, RT2560_CSR17); tsf_lo = RAL_READ(sc, RT2560_CSR16); tap->wr_tsf = htole64(((uint64_t)tsf_hi << 32) | tsf_lo); tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(desc->rate, (desc->flags & htole32(RT2560_RX_OFDM)) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antenna = sc->rx_ant; tap->wr_antsignal = nf + rssi; tap->wr_antnoise = nf; } sc->sc_flags |= RT2560_F_INPUT_RUNNING; RAL_UNLOCK(sc); wh = mtod(m, struct ieee80211_frame *); ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); RAL_LOCK(sc); sc->sc_flags &= ~RT2560_F_INPUT_RUNNING; skip: desc->flags = htole32(RT2560_RX_BUSY); DPRINTFN(sc, 15, "decryption done idx=%u\n", sc->rxq.cur_decrypt); sc->rxq.cur_decrypt = (sc->rxq.cur_decrypt + 1) % RT2560_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); } /* * Some frames were received. Pass them to the hardware cipher engine before * sending them to the 802.11 layer. */ static void rt2560_rx_intr(struct rt2560_softc *sc) { struct rt2560_rx_desc *desc; struct rt2560_rx_data *data; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &sc->rxq.desc[sc->rxq.cur]; data = &sc->rxq.data[sc->rxq.cur]; if ((le32toh(desc->flags) & RT2560_RX_BUSY) || (le32toh(desc->flags) & RT2560_RX_CIPHER_BUSY)) break; data->drop = 0; if ((le32toh(desc->flags) & RT2560_RX_PHY_ERROR) || (le32toh(desc->flags) & RT2560_RX_CRC_ERROR)) { /* * This should not happen since we did not request * to receive those frames when we filled RXCSR0. */ DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n", le32toh(desc->flags)); data->drop = 1; } if (((le32toh(desc->flags) >> 16) & 0xfff) > MCLBYTES) { DPRINTFN(sc, 5, "%s\n", "bad length"); data->drop = 1; } /* mark the frame for decryption */ desc->flags |= htole32(RT2560_RX_CIPHER_BUSY); DPRINTFN(sc, 15, "rx done idx=%u\n", sc->rxq.cur); sc->rxq.cur = (sc->rxq.cur + 1) % RT2560_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); /* kick decrypt */ RAL_WRITE(sc, RT2560_SECCSR0, RT2560_KICK_DECRYPT); } static void rt2560_beacon_update(struct ieee80211vap *vap, int item) { struct ieee80211_beacon_offsets *bo = &vap->iv_bcn_off; setbit(bo->bo_flags, item); } /* * This function is called periodically in IBSS mode when a new beacon must be * sent out. */ static void rt2560_beacon_expire(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct rt2560_tx_data *data; if (ic->ic_opmode != IEEE80211_M_IBSS && ic->ic_opmode != IEEE80211_M_HOSTAP && ic->ic_opmode != IEEE80211_M_MBSS) return; data = &sc->bcnq.data[sc->bcnq.next]; /* * Don't send beacon if bsschan isn't set */ if (data->ni == NULL) return; bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->bcnq.data_dmat, data->map); /* XXX 1 =>'s mcast frames which means all PS sta's will wakeup! */ ieee80211_beacon_update(data->ni, data->m, 1); rt2560_tx_bcn(sc, data->m, data->ni); DPRINTFN(sc, 15, "%s", "beacon expired\n"); sc->bcnq.next = (sc->bcnq.next + 1) % RT2560_BEACON_RING_COUNT; } /* ARGSUSED */ static void rt2560_wakeup_expire(struct rt2560_softc *sc) { DPRINTFN(sc, 2, "%s", "wakeup expired\n"); } void rt2560_intr(void *arg) { struct rt2560_softc *sc = arg; uint32_t r; RAL_LOCK(sc); /* disable interrupts */ RAL_WRITE(sc, RT2560_CSR8, 0xffffffff); /* don't re-enable interrupts if we're shutting down */ if (!(sc->sc_flags & RT2560_F_RUNNING)) { RAL_UNLOCK(sc); return; } r = RAL_READ(sc, RT2560_CSR7); RAL_WRITE(sc, RT2560_CSR7, r); if (r & RT2560_BEACON_EXPIRE) rt2560_beacon_expire(sc); if (r & RT2560_WAKEUP_EXPIRE) rt2560_wakeup_expire(sc); if (r & RT2560_ENCRYPTION_DONE) rt2560_encryption_intr(sc); if (r & RT2560_TX_DONE) rt2560_tx_intr(sc); if (r & RT2560_PRIO_DONE) rt2560_prio_intr(sc); if (r & RT2560_DECRYPTION_DONE) rt2560_decryption_intr(sc); if (r & RT2560_RX_DONE) { rt2560_rx_intr(sc); rt2560_encryption_intr(sc); } /* re-enable interrupts */ RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK); RAL_UNLOCK(sc); } #define RAL_SIFS 10 /* us */ #define RT2560_TXRX_TURNAROUND 10 /* us */ static uint8_t rt2560_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void rt2560_setup_tx_desc(struct rt2560_softc *sc, struct rt2560_tx_desc *desc, uint32_t flags, int len, int rate, int encrypt, bus_addr_t physaddr) { struct ieee80211com *ic = &sc->sc_ic; uint16_t plcp_length; int remainder; desc->flags = htole32(flags); desc->flags |= htole32(len << 16); desc->physaddr = htole32(physaddr); desc->wme = htole16( RT2560_AIFSN(2) | RT2560_LOGCWMIN(3) | RT2560_LOGCWMAX(8)); /* setup PLCP fields */ desc->plcp_signal = rt2560_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RT2560_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { plcp_length = howmany(16 * len, rate); if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2560_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } if (!encrypt) desc->flags |= htole32(RT2560_TX_VALID); desc->flags |= encrypt ? htole32(RT2560_TX_CIPHER_BUSY) : htole32(RT2560_TX_BUSY); } static int rt2560_tx_bcn(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; int nsegs, rate, error; desc = &sc->bcnq.desc[sc->bcnq.cur]; data = &sc->bcnq.data[sc->bcnq.cur]; /* XXX maybe a separate beacon rate? */ rate = vap->iv_txparms[ieee80211_chan2mode(ni->ni_chan)].mgmtrate; error = bus_dmamap_load_mbuf_sg(sc->bcnq.data_dmat, data->map, m0, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; rt2560_setup_tx_desc(sc, desc, RT2560_TX_IFS_NEWBACKOFF | RT2560_TX_TIMESTAMP, m0->m_pkthdr.len, rate, 0, segs->ds_addr); DPRINTFN(sc, 10, "sending beacon frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->bcnq.cur, rate); bus_dmamap_sync(sc->bcnq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->bcnq.desc_dmat, sc->bcnq.desc_map, BUS_DMASYNC_PREWRITE); sc->bcnq.cur = (sc->bcnq.cur + 1) % RT2560_BEACON_RING_COUNT; return 0; } static int rt2560_tx_mgt(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint16_t dur; uint32_t flags = 0; int nsegs, rate, error; desc = &sc->prioq.desc[sc->prioq.cur]; data = &sc->prioq.data[sc->prioq.cur]; rate = ni->ni_txparms->mgmtrate; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } } error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* management frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; wh = mtod(m0, struct ieee80211_frame *); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2560_TX_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); /* tell hardware to add timestamp for probe responses */ if ((wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_MGT && (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= RT2560_TX_TIMESTAMP; } rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 0, segs->ds_addr); bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->prioq.cur, rate); /* kick prio */ sc->prioq.queued++; sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT; RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO); return 0; } static int rt2560_sendprot(struct rt2560_softc *sc, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; const struct ieee80211_frame *wh; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct mbuf *mprot; int protrate, ackrate, pktlen, flags, isshort, error; uint16_t dur; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; int nsegs; KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(ic->ic_rt, rate); ackrate = ieee80211_ack_rate(ic->ic_rt, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags = RT2560_TX_MORE_FRAG; if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags |= RT2560_TX_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return ENOBUFS; } desc = &sc->txq.desc[sc->txq.cur_encrypt]; data = &sc->txq.data[sc->txq.cur_encrypt]; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, mprot, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(mprot); return error; } data->m = mprot; data->ni = ieee80211_ref_node(ni); /* ctl frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; rt2560_setup_tx_desc(sc, desc, flags, mprot->m_pkthdr.len, protrate, 1, segs->ds_addr); bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); sc->txq.queued++; sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT; return 0; } static int rt2560_tx_raw(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint32_t flags; int nsegs, rate, error; desc = &sc->prioq.desc[sc->prioq.cur]; data = &sc->prioq.data[sc->prioq.cur]; rate = params->ibp_rate0; if (!ieee80211_isratevalid(ic->ic_rt, rate)) { /* XXX fall back to mcast/mgmt rate? */ m_freem(m0); return EINVAL; } flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= RT2560_TX_ACK; if (params->ibp_flags & (IEEE80211_BPF_RTS|IEEE80211_BPF_CTS)) { error = rt2560_sendprot(sc, m0, ni, params->ibp_flags & IEEE80211_BPF_RTS ? IEEE80211_PROT_RTSCTS : IEEE80211_PROT_CTSONLY, rate); if (error) { m_freem(m0); return error; } flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS; } error = bus_dmamap_load_mbuf_sg(sc->prioq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(ni->ni_vap, m0); } data->m = m0; data->ni = ni; /* XXX need to setup descriptor ourself */ rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, (params->ibp_flags & IEEE80211_BPF_CRYPTO) != 0, segs->ds_addr); bus_dmamap_sync(sc->prioq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->prioq.desc_dmat, sc->prioq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending raw frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->prioq.cur, rate); /* kick prio */ sc->prioq.queued++; sc->prioq.cur = (sc->prioq.cur + 1) % RT2560_PRIO_RING_COUNT; RAL_WRITE(sc, RT2560_TXCSR0, RT2560_KICK_PRIO); return 0; } static int rt2560_tx_data(struct rt2560_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2560_tx_desc *desc; struct rt2560_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211_key *k; struct mbuf *mnew; bus_dma_segment_t segs[RT2560_MAX_SCATTER]; uint16_t dur; uint32_t flags; int nsegs, rate, error; wh = mtod(m0, struct ieee80211_frame *); if (m0->m_flags & M_EAPOL) { rate = tp->mgmtrate; } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { rate = tp->mcastrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rt2560_sendprot(sc, m0, ni, prot, rate); if (error) { m_freem(m0); return error; } flags |= RT2560_TX_LONG_RETRY | RT2560_TX_IFS_SIFS; } } data = &sc->txq.data[sc->txq.cur_encrypt]; desc = &sc->txq.desc[sc->txq.cur_encrypt]; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_NOWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(sc->txq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { struct rt2560_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; tap->wt_antenna = sc->tx_ant; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* remember link conditions for rate adaptation algorithm */ if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) { data->rix = ni->ni_txrate; /* XXX probably need last rssi value and not avg */ data->rssi = ic->ic_node_getrssi(ni); } else data->rix = IEEE80211_FIXED_RATE_NONE; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2560_TX_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); } rt2560_setup_tx_desc(sc, desc, flags, m0->m_pkthdr.len, rate, 1, segs->ds_addr); bus_dmamap_sync(sc->txq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->txq.desc_dmat, sc->txq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->txq.cur_encrypt, rate); /* kick encrypt */ sc->txq.queued++; sc->txq.cur_encrypt = (sc->txq.cur_encrypt + 1) % RT2560_TX_RING_COUNT; RAL_WRITE(sc, RT2560_SECCSR1, RT2560_KICK_ENCRYPT); return 0; } static int rt2560_transmit(struct ieee80211com *ic, struct mbuf *m) { struct rt2560_softc *sc = ic->ic_softc; int error; RAL_LOCK(sc); if ((sc->sc_flags & RT2560_F_RUNNING) == 0) { RAL_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { RAL_UNLOCK(sc); return (error); } rt2560_start(sc); RAL_UNLOCK(sc); return (0); } static void rt2560_start(struct rt2560_softc *sc) { struct ieee80211_node *ni; struct mbuf *m; RAL_LOCK_ASSERT(sc); while (sc->txq.queued < RT2560_TX_RING_COUNT - 1 && (m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (rt2560_tx_data(sc, m, ni) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); break; } sc->sc_tx_timer = 5; } } static void rt2560_watchdog(void *arg) { struct rt2560_softc *sc = arg; RAL_LOCK_ASSERT(sc); KASSERT(sc->sc_flags & RT2560_F_RUNNING, ("not running")); if (sc->sc_invalid) /* card ejected */ return; rt2560_encryption_intr(sc); rt2560_tx_intr(sc); if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); rt2560_init_locked(sc); counter_u64_add(sc->sc_ic.ic_oerrors, 1); /* NB: callout is reset in rt2560_init() */ return; } callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc); } static void rt2560_parent(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; int startall = 0; RAL_LOCK(sc); if (ic->ic_nrunning > 0) { if ((sc->sc_flags & RT2560_F_RUNNING) == 0) { rt2560_init_locked(sc); startall = 1; } else rt2560_update_promisc(ic); } else if (sc->sc_flags & RT2560_F_RUNNING) rt2560_stop_locked(sc); RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static void rt2560_bbp_write(struct rt2560_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2560_BBP_WRITE | RT2560_BBP_BUSY | reg << 8 | val; RAL_WRITE(sc, RT2560_BBPCSR, tmp); DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val); } static uint8_t rt2560_bbp_read(struct rt2560_softc *sc, uint8_t reg) { uint32_t val; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_BBPCSR) & RT2560_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } val = RT2560_BBP_BUSY | reg << 8; RAL_WRITE(sc, RT2560_BBPCSR, val); for (ntries = 0; ntries < 100; ntries++) { val = RAL_READ(sc, RT2560_BBPCSR); if (!(val & RT2560_BBP_BUSY)) return val & 0xff; DELAY(1); } device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } static void rt2560_rf_write(struct rt2560_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2560_RFCSR) & RT2560_RF_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2560_RF_BUSY | RT2560_RF_20BIT | (val & 0xfffff) << 2 | (reg & 0x3); RAL_WRITE(sc, RT2560_RFCSR, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 0x3, val & 0xfffff); } static void rt2560_set_chan(struct rt2560_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; uint8_t power, tmp; u_int i, chan; chan = ieee80211_chan2ieee(ic, c); KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan)); if (IEEE80211_IS_CHAN_2GHZ(c)) power = min(sc->txpow[chan - 1], 31); else power = 31; /* adjust txpower using ifconfig settings */ power -= (100 - ic->ic_txpowlimit) / 8; DPRINTFN(sc, 2, "setting channel to %u, txpower to %u\n", chan, power); switch (sc->rf_rev) { case RT2560_RF_2522: rt2560_rf_write(sc, RAL_RF1, 0x00814); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2522_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); break; case RT2560_RF_2523: rt2560_rf_write(sc, RAL_RF1, 0x08804); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2523_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x38044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2524: rt2560_rf_write(sc, RAL_RF1, 0x0c808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2524_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2525: rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_hi_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00280 : 0x00286); break; case RT2560_RF_2525E: rt2560_rf_write(sc, RAL_RF1, 0x08808); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2525e_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan == 14) ? 0x00286 : 0x00282); break; case RT2560_RF_2526: rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_hi_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); rt2560_rf_write(sc, RAL_RF1, 0x08804); rt2560_rf_write(sc, RAL_RF2, rt2560_rf2526_r2[chan - 1]); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x18044); rt2560_rf_write(sc, RAL_RF4, (chan & 1) ? 0x00386 : 0x00381); break; /* dual-band RF */ case RT2560_RF_5222: for (i = 0; rt2560_rf5222[i].chan != chan; i++); rt2560_rf_write(sc, RAL_RF1, rt2560_rf5222[i].r1); rt2560_rf_write(sc, RAL_RF2, rt2560_rf5222[i].r2); rt2560_rf_write(sc, RAL_RF3, power << 7 | 0x00040); rt2560_rf_write(sc, RAL_RF4, rt2560_rf5222[i].r4); break; default: printf("unknown ral rev=%d\n", sc->rf_rev); } /* XXX */ if ((ic->ic_flags & IEEE80211_F_SCAN) == 0) { /* set Japan filter bit for channel 14 */ tmp = rt2560_bbp_read(sc, 70); tmp &= ~RT2560_JAPAN_FILTER; if (chan == 14) tmp |= RT2560_JAPAN_FILTER; rt2560_bbp_write(sc, 70, tmp); /* clear CRC errors */ RAL_READ(sc, RT2560_CNT0); } } static void rt2560_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct rt2560_softc *sc = ic->ic_softc; uint8_t bands[IEEE80211_MODE_BYTES]; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, rt2560_chan_2ghz, nitems(rt2560_chan_2ghz), bands, 0); if (sc->rf_rev == RT2560_RF_5222) { setbit(bands, IEEE80211_MODE_11A); ieee80211_add_channel_list_5ghz(chans, maxchans, nchans, rt2560_chan_5ghz, nitems(rt2560_chan_5ghz), bands, 0); } } static void rt2560_set_channel(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; RAL_LOCK(sc); rt2560_set_chan(sc, ic->ic_curchan); RAL_UNLOCK(sc); } #if 0 /* * Disable RF auto-tuning. */ static void rt2560_disable_rf_tune(struct rt2560_softc *sc) { uint32_t tmp; if (sc->rf_rev != RT2560_RF_2523) { tmp = sc->rf_regs[RAL_RF1] & ~RAL_RF1_AUTOTUNE; rt2560_rf_write(sc, RAL_RF1, tmp); } tmp = sc->rf_regs[RAL_RF3] & ~RAL_RF3_AUTOTUNE; rt2560_rf_write(sc, RAL_RF3, tmp); DPRINTFN(sc, 2, "%s", "disabling RF autotune\n"); } #endif /* * Refer to IEEE Std 802.11-1999 pp. 123 for more information on TSF * synchronization. */ static void rt2560_enable_tsf_sync(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t logcwmin, preload; uint32_t tmp; /* first, disable TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); tmp = 16 * vap->iv_bss->ni_intval; RAL_WRITE(sc, RT2560_CSR12, tmp); RAL_WRITE(sc, RT2560_CSR13, 0); logcwmin = 5; preload = (vap->iv_opmode == IEEE80211_M_STA) ? 384 : 1024; tmp = logcwmin << 16 | preload; RAL_WRITE(sc, RT2560_BCNOCSR, tmp); /* finally, enable TSF synchronization */ tmp = RT2560_ENABLE_TSF | RT2560_ENABLE_TBCN; if (ic->ic_opmode == IEEE80211_M_STA) tmp |= RT2560_ENABLE_TSF_SYNC(1); else tmp |= RT2560_ENABLE_TSF_SYNC(2) | RT2560_ENABLE_BEACON_GENERATOR; RAL_WRITE(sc, RT2560_CSR14, tmp); DPRINTF(sc, "%s", "enabling TSF synchronization\n"); } static void rt2560_enable_tsf(struct rt2560_softc *sc) { RAL_WRITE(sc, RT2560_CSR14, 0); RAL_WRITE(sc, RT2560_CSR14, RT2560_ENABLE_TSF_SYNC(2) | RT2560_ENABLE_TSF); } static void rt2560_update_plcp(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; /* no short preamble for 1Mbps */ RAL_WRITE(sc, RT2560_PLCP1MCSR, 0x00700400); if (!(ic->ic_flags & IEEE80211_F_SHPREAMBLE)) { /* values taken from the reference driver */ RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380401); RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x00150402); RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b8403); } else { /* same values as above or'ed 0x8 */ RAL_WRITE(sc, RT2560_PLCP2MCSR, 0x00380409); RAL_WRITE(sc, RT2560_PLCP5p5MCSR, 0x0015040a); RAL_WRITE(sc, RT2560_PLCP11MCSR, 0x000b840b); } DPRINTF(sc, "updating PLCP for %s preamble\n", (ic->ic_flags & IEEE80211_F_SHPREAMBLE) ? "short" : "long"); } /* * This function can be called by ieee80211_set_shortslottime(). Refer to * IEEE Std 802.11-1999 pp. 85 to know how these values are computed. */ static void rt2560_update_slot(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; uint8_t slottime; uint16_t tx_sifs, tx_pifs, tx_difs, eifs; uint32_t tmp; #ifndef FORCE_SLOTTIME slottime = IEEE80211_GET_SLOTTIME(ic); #else /* * Setting slot time according to "short slot time" capability * in beacon/probe_resp seems to cause problem to acknowledge * certain AP's data frames transimitted at CCK/DS rates: the * problematic AP keeps retransmitting data frames, probably * because MAC level acks are not received by hardware. * So we cheat a little bit here by claiming we are capable of * "short slot time" but setting hardware slot time to the normal * slot time. ral(4) does not seem to have trouble to receive * frames transmitted using short slot time even if hardware * slot time is set to normal slot time. If we didn't use this * trick, we would have to claim that short slot time is not * supported; this would give relative poor RX performance * (-1Mb~-2Mb lower) and the _whole_ BSS would stop using short * slot time. */ slottime = IEEE80211_DUR_SLOT; #endif /* update the MAC slot boundaries */ tx_sifs = RAL_SIFS - RT2560_TXRX_TURNAROUND; tx_pifs = tx_sifs + slottime; tx_difs = IEEE80211_DUR_DIFS(tx_sifs, slottime); eifs = (ic->ic_curmode == IEEE80211_MODE_11B) ? 364 : 60; tmp = RAL_READ(sc, RT2560_CSR11); tmp = (tmp & ~0x1f00) | slottime << 8; RAL_WRITE(sc, RT2560_CSR11, tmp); tmp = tx_pifs << 16 | tx_sifs; RAL_WRITE(sc, RT2560_CSR18, tmp); tmp = eifs << 16 | tx_difs; RAL_WRITE(sc, RT2560_CSR19, tmp); DPRINTF(sc, "setting slottime to %uus\n", slottime); } static void rt2560_set_basicrates(struct rt2560_softc *sc, const struct ieee80211_rateset *rs) { struct ieee80211com *ic = &sc->sc_ic; uint32_t mask = 0; uint8_t rate; int i; for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i]; if (!(rate & IEEE80211_RATE_BASIC)) continue; mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt, IEEE80211_RV(rate)); } RAL_WRITE(sc, RT2560_ARSP_PLCP_1, mask); DPRINTF(sc, "Setting basic rate mask to 0x%x\n", mask); } static void rt2560_update_led(struct rt2560_softc *sc, int led1, int led2) { uint32_t tmp; /* set ON period to 70ms and OFF period to 30ms */ tmp = led1 << 16 | led2 << 17 | 70 << 8 | 30; RAL_WRITE(sc, RT2560_LEDCSR, tmp); } static void rt2560_set_bssid(struct rt2560_softc *sc, const uint8_t *bssid) { uint32_t tmp; tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24; RAL_WRITE(sc, RT2560_CSR5, tmp); tmp = bssid[4] | bssid[5] << 8; RAL_WRITE(sc, RT2560_CSR6, tmp); DPRINTF(sc, "setting BSSID to %6D\n", bssid, ":"); } static void rt2560_set_macaddr(struct rt2560_softc *sc, const uint8_t *addr) { uint32_t tmp; tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24; RAL_WRITE(sc, RT2560_CSR3, tmp); tmp = addr[4] | addr[5] << 8; RAL_WRITE(sc, RT2560_CSR4, tmp); DPRINTF(sc, "setting MAC address to %6D\n", addr, ":"); } static void rt2560_get_macaddr(struct rt2560_softc *sc, uint8_t *addr) { uint32_t tmp; tmp = RAL_READ(sc, RT2560_CSR3); addr[0] = tmp & 0xff; addr[1] = (tmp >> 8) & 0xff; addr[2] = (tmp >> 16) & 0xff; addr[3] = (tmp >> 24); tmp = RAL_READ(sc, RT2560_CSR4); addr[4] = tmp & 0xff; addr[5] = (tmp >> 8) & 0xff; } static void rt2560_update_promisc(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; uint32_t tmp; tmp = RAL_READ(sc, RT2560_RXCSR0); tmp &= ~RT2560_DROP_NOT_TO_ME; if (ic->ic_promisc == 0) tmp |= RT2560_DROP_NOT_TO_ME; RAL_WRITE(sc, RT2560_RXCSR0, tmp); DPRINTF(sc, "%s promiscuous mode\n", (ic->ic_promisc > 0) ? "entering" : "leaving"); } static const char * rt2560_get_rf(int rev) { switch (rev) { case RT2560_RF_2522: return "RT2522"; case RT2560_RF_2523: return "RT2523"; case RT2560_RF_2524: return "RT2524"; case RT2560_RF_2525: return "RT2525"; case RT2560_RF_2525E: return "RT2525e"; case RT2560_RF_2526: return "RT2526"; case RT2560_RF_5222: return "RT5222"; default: return "unknown"; } } static void rt2560_read_config(struct rt2560_softc *sc) { uint16_t val; int i; val = rt2560_eeprom_read(sc, RT2560_EEPROM_CONFIG0); sc->rf_rev = (val >> 11) & 0x7; sc->hw_radio = (val >> 10) & 0x1; sc->led_mode = (val >> 6) & 0x7; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; /* read default values for BBP registers */ for (i = 0; i < 16; i++) { val = rt2560_eeprom_read(sc, RT2560_EEPROM_BBP_BASE + i); if (val == 0 || val == 0xffff) continue; sc->bbp_prom[i].reg = val >> 8; sc->bbp_prom[i].val = val & 0xff; } /* read Tx power for all b/g channels */ for (i = 0; i < 14 / 2; i++) { val = rt2560_eeprom_read(sc, RT2560_EEPROM_TXPOWER + i); sc->txpow[i * 2] = val & 0xff; sc->txpow[i * 2 + 1] = val >> 8; } for (i = 0; i < 14; ++i) { if (sc->txpow[i] > 31) sc->txpow[i] = 24; } val = rt2560_eeprom_read(sc, RT2560_EEPROM_CALIBRATE); if ((val & 0xff) == 0xff) sc->rssi_corr = RT2560_DEFAULT_RSSI_CORR; else sc->rssi_corr = val & 0xff; DPRINTF(sc, "rssi correction %d, calibrate 0x%02x\n", sc->rssi_corr, val); } static void rt2560_scan_start(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; /* abort TSF synchronization */ RAL_WRITE(sc, RT2560_CSR14, 0); rt2560_set_bssid(sc, ieee80211broadcastaddr); } static void rt2560_scan_end(struct ieee80211com *ic) { struct rt2560_softc *sc = ic->ic_softc; struct ieee80211vap *vap = ic->ic_scan->ss_vap; rt2560_enable_tsf_sync(sc); /* XXX keep local copy */ rt2560_set_bssid(sc, vap->iv_bss->ni_bssid); } static int rt2560_bbp_init(struct rt2560_softc *sc) { int i, ntries; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { if (rt2560_bbp_read(sc, RT2560_BBP_VERSION) != 0) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < nitems(rt2560_def_bbp); i++) { rt2560_bbp_write(sc, rt2560_def_bbp[i].reg, rt2560_def_bbp[i].val); } /* initialize BBP registers to values stored in EEPROM */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0 && sc->bbp_prom[i].val == 0) break; rt2560_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } rt2560_bbp_write(sc, 17, 0x48); /* XXX restore bbp17 */ return 0; } static void rt2560_set_txantenna(struct rt2560_softc *sc, int antenna) { uint32_t tmp; uint8_t tx; tx = rt2560_bbp_read(sc, RT2560_BBP_TX) & ~RT2560_BBP_ANTMASK; if (antenna == 1) tx |= RT2560_BBP_ANTA; else if (antenna == 2) tx |= RT2560_BBP_ANTB; else tx |= RT2560_BBP_DIVERSITY; /* need to force I/Q flip for RF 2525e, 2526 and 5222 */ if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526 || sc->rf_rev == RT2560_RF_5222) tx |= RT2560_BBP_FLIPIQ; rt2560_bbp_write(sc, RT2560_BBP_TX, tx); /* update values for CCK and OFDM in BBPCSR1 */ tmp = RAL_READ(sc, RT2560_BBPCSR1) & ~0x00070007; tmp |= (tx & 0x7) << 16 | (tx & 0x7); RAL_WRITE(sc, RT2560_BBPCSR1, tmp); } static void rt2560_set_rxantenna(struct rt2560_softc *sc, int antenna) { uint8_t rx; rx = rt2560_bbp_read(sc, RT2560_BBP_RX) & ~RT2560_BBP_ANTMASK; if (antenna == 1) rx |= RT2560_BBP_ANTA; else if (antenna == 2) rx |= RT2560_BBP_ANTB; else rx |= RT2560_BBP_DIVERSITY; /* need to force no I/Q flip for RF 2525e and 2526 */ if (sc->rf_rev == RT2560_RF_2525E || sc->rf_rev == RT2560_RF_2526) rx &= ~RT2560_BBP_FLIPIQ; rt2560_bbp_write(sc, RT2560_BBP_RX, rx); } static void rt2560_init_locked(struct rt2560_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; int i; RAL_LOCK_ASSERT(sc); rt2560_stop_locked(sc); /* setup tx rings */ tmp = RT2560_PRIO_RING_COUNT << 24 | RT2560_ATIM_RING_COUNT << 16 | RT2560_TX_RING_COUNT << 8 | RT2560_TX_DESC_SIZE; /* rings must be initialized in this exact order */ RAL_WRITE(sc, RT2560_TXCSR2, tmp); RAL_WRITE(sc, RT2560_TXCSR3, sc->txq.physaddr); RAL_WRITE(sc, RT2560_TXCSR5, sc->prioq.physaddr); RAL_WRITE(sc, RT2560_TXCSR4, sc->atimq.physaddr); RAL_WRITE(sc, RT2560_TXCSR6, sc->bcnq.physaddr); /* setup rx ring */ tmp = RT2560_RX_RING_COUNT << 8 | RT2560_RX_DESC_SIZE; RAL_WRITE(sc, RT2560_RXCSR1, tmp); RAL_WRITE(sc, RT2560_RXCSR2, sc->rxq.physaddr); /* initialize MAC registers to default values */ for (i = 0; i < nitems(rt2560_def_mac); i++) RAL_WRITE(sc, rt2560_def_mac[i].reg, rt2560_def_mac[i].val); rt2560_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); /* set basic rate set (will be updated later) */ RAL_WRITE(sc, RT2560_ARSP_PLCP_1, 0x153); rt2560_update_slot(ic); rt2560_update_plcp(sc); rt2560_update_led(sc, 0, 0); RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC); RAL_WRITE(sc, RT2560_CSR1, RT2560_HOST_READY); if (rt2560_bbp_init(sc) != 0) { rt2560_stop_locked(sc); return; } rt2560_set_txantenna(sc, sc->tx_ant); rt2560_set_rxantenna(sc, sc->rx_ant); /* set default BSS channel */ rt2560_set_chan(sc, ic->ic_curchan); /* kick Rx */ tmp = RT2560_DROP_PHY_ERROR | RT2560_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2560_DROP_CTL | RT2560_DROP_VERSION_ERROR; if (ic->ic_opmode != IEEE80211_M_HOSTAP && ic->ic_opmode != IEEE80211_M_MBSS) tmp |= RT2560_DROP_TODS; if (ic->ic_promisc == 0) tmp |= RT2560_DROP_NOT_TO_ME; } RAL_WRITE(sc, RT2560_RXCSR0, tmp); /* clear old FCS and Rx FIFO errors */ RAL_READ(sc, RT2560_CNT0); RAL_READ(sc, RT2560_CNT4); /* clear any pending interrupts */ RAL_WRITE(sc, RT2560_CSR7, 0xffffffff); /* enable interrupts */ RAL_WRITE(sc, RT2560_CSR8, RT2560_INTR_MASK); sc->sc_flags |= RT2560_F_RUNNING; callout_reset(&sc->watchdog_ch, hz, rt2560_watchdog, sc); } static void rt2560_init(void *priv) { struct rt2560_softc *sc = priv; struct ieee80211com *ic = &sc->sc_ic; RAL_LOCK(sc); rt2560_init_locked(sc); RAL_UNLOCK(sc); if (sc->sc_flags & RT2560_F_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } static void rt2560_stop_locked(struct rt2560_softc *sc) { volatile int *flags = &sc->sc_flags; RAL_LOCK_ASSERT(sc); while (*flags & RT2560_F_INPUT_RUNNING) msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10); callout_stop(&sc->watchdog_ch); sc->sc_tx_timer = 0; if (sc->sc_flags & RT2560_F_RUNNING) { sc->sc_flags &= ~RT2560_F_RUNNING; /* abort Tx */ RAL_WRITE(sc, RT2560_TXCSR0, RT2560_ABORT_TX); /* disable Rx */ RAL_WRITE(sc, RT2560_RXCSR0, RT2560_DISABLE_RX); /* reset ASIC (imply reset BBP) */ RAL_WRITE(sc, RT2560_CSR1, RT2560_RESET_ASIC); RAL_WRITE(sc, RT2560_CSR1, 0); /* disable interrupts */ RAL_WRITE(sc, RT2560_CSR8, 0xffffffff); /* reset Tx and Rx rings */ rt2560_reset_tx_ring(sc, &sc->txq); rt2560_reset_tx_ring(sc, &sc->atimq); rt2560_reset_tx_ring(sc, &sc->prioq); rt2560_reset_tx_ring(sc, &sc->bcnq); rt2560_reset_rx_ring(sc, &sc->rxq); } } void rt2560_stop(void *arg) { struct rt2560_softc *sc = arg; RAL_LOCK(sc); rt2560_stop_locked(sc); RAL_UNLOCK(sc); } static int rt2560_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct rt2560_softc *sc = ic->ic_softc; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(sc->sc_flags & RT2560_F_RUNNING)) { RAL_UNLOCK(sc); m_freem(m); return ENETDOWN; } if (sc->prioq.queued >= RT2560_PRIO_RING_COUNT) { RAL_UNLOCK(sc); m_freem(m); return ENOBUFS; /* XXX */ } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ if (rt2560_tx_mgt(sc, m, ni) != 0) goto bad; } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ if (rt2560_tx_raw(sc, m, ni, params)) goto bad; } sc->sc_tx_timer = 5; RAL_UNLOCK(sc); return 0; bad: RAL_UNLOCK(sc); return EIO; /* XXX */ } Index: head/sys/dev/ral/rt2661.c =================================================================== --- head/sys/dev/ral/rt2661.c (revision 327948) +++ head/sys/dev/ral/rt2661.c (revision 327949) @@ -1,2794 +1,2794 @@ /* $FreeBSD$ */ /*- * Copyright (c) 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /*- * Ralink Technology RT2561, RT2561S and RT2661 chipset driver * http://www.ralinktech.com/ */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define RAL_DEBUG #ifdef RAL_DEBUG #define DPRINTF(sc, fmt, ...) do { \ if (sc->sc_debug > 0) \ printf(fmt, __VA_ARGS__); \ } while (0) #define DPRINTFN(sc, n, fmt, ...) do { \ if (sc->sc_debug >= (n)) \ printf(fmt, __VA_ARGS__); \ } while (0) #else #define DPRINTF(sc, fmt, ...) #define DPRINTFN(sc, n, fmt, ...) #endif static struct ieee80211vap *rt2661_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void rt2661_vap_delete(struct ieee80211vap *); static void rt2661_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int rt2661_alloc_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *, int); static void rt2661_reset_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *); static void rt2661_free_tx_ring(struct rt2661_softc *, struct rt2661_tx_ring *); static int rt2661_alloc_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *, int); static void rt2661_reset_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *); static void rt2661_free_rx_ring(struct rt2661_softc *, struct rt2661_rx_ring *); static int rt2661_newstate(struct ieee80211vap *, enum ieee80211_state, int); static uint16_t rt2661_eeprom_read(struct rt2661_softc *, uint8_t); static void rt2661_rx_intr(struct rt2661_softc *); static void rt2661_tx_intr(struct rt2661_softc *); static void rt2661_tx_dma_intr(struct rt2661_softc *, struct rt2661_tx_ring *); static void rt2661_mcu_beacon_expire(struct rt2661_softc *); static void rt2661_mcu_wakeup(struct rt2661_softc *); static void rt2661_mcu_cmd_intr(struct rt2661_softc *); static void rt2661_scan_start(struct ieee80211com *); static void rt2661_scan_end(struct ieee80211com *); static void rt2661_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); static void rt2661_set_channel(struct ieee80211com *); static void rt2661_setup_tx_desc(struct rt2661_softc *, struct rt2661_tx_desc *, uint32_t, uint16_t, int, int, const bus_dma_segment_t *, int, int); static int rt2661_tx_data(struct rt2661_softc *, struct mbuf *, struct ieee80211_node *, int); static int rt2661_tx_mgt(struct rt2661_softc *, struct mbuf *, struct ieee80211_node *); static int rt2661_transmit(struct ieee80211com *, struct mbuf *); static void rt2661_start(struct rt2661_softc *); static int rt2661_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static void rt2661_watchdog(void *); static void rt2661_parent(struct ieee80211com *); static void rt2661_bbp_write(struct rt2661_softc *, uint8_t, uint8_t); static uint8_t rt2661_bbp_read(struct rt2661_softc *, uint8_t); static void rt2661_rf_write(struct rt2661_softc *, uint8_t, uint32_t); static int rt2661_tx_cmd(struct rt2661_softc *, uint8_t, uint16_t); static void rt2661_select_antenna(struct rt2661_softc *); static void rt2661_enable_mrr(struct rt2661_softc *); static void rt2661_set_txpreamble(struct rt2661_softc *); static void rt2661_set_basicrates(struct rt2661_softc *, const struct ieee80211_rateset *); static void rt2661_select_band(struct rt2661_softc *, struct ieee80211_channel *); static void rt2661_set_chan(struct rt2661_softc *, struct ieee80211_channel *); static void rt2661_set_bssid(struct rt2661_softc *, const uint8_t *); static void rt2661_set_macaddr(struct rt2661_softc *, const uint8_t *); static void rt2661_update_promisc(struct ieee80211com *); static int rt2661_wme_update(struct ieee80211com *) __unused; static void rt2661_update_slot(struct ieee80211com *); static const char *rt2661_get_rf(int); static void rt2661_read_eeprom(struct rt2661_softc *, uint8_t macaddr[IEEE80211_ADDR_LEN]); static int rt2661_bbp_init(struct rt2661_softc *); static void rt2661_init_locked(struct rt2661_softc *); static void rt2661_init(void *); static void rt2661_stop_locked(struct rt2661_softc *); static void rt2661_stop(void *); static int rt2661_load_microcode(struct rt2661_softc *); #ifdef notyet static void rt2661_rx_tune(struct rt2661_softc *); static void rt2661_radar_start(struct rt2661_softc *); static int rt2661_radar_stop(struct rt2661_softc *); #endif static int rt2661_prepare_beacon(struct rt2661_softc *, struct ieee80211vap *); static void rt2661_enable_tsf_sync(struct rt2661_softc *); static void rt2661_enable_tsf(struct rt2661_softc *); static int rt2661_get_rssi(struct rt2661_softc *, uint8_t); static const struct { uint32_t reg; uint32_t val; } rt2661_def_mac[] = { RT2661_DEF_MAC }; static const struct { uint8_t reg; uint8_t val; } rt2661_def_bbp[] = { RT2661_DEF_BBP }; static const struct rfprog { uint8_t chan; uint32_t r1, r2, r3, r4; } rt2661_rf5225_1[] = { RT2661_RF5225_1 }, rt2661_rf5225_2[] = { RT2661_RF5225_2 }; static const uint8_t rt2661_chan_2ghz[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 }; static const uint8_t rt2661_chan_5ghz[] = { 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 149, 153, 157, 161, 165 }; int rt2661_attach(device_t dev, int id) { struct rt2661_softc *sc = device_get_softc(dev); struct ieee80211com *ic = &sc->sc_ic; uint32_t val; int error, ac, ntries; sc->sc_id = id; sc->sc_dev = dev; mtx_init(&sc->sc_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF | MTX_RECURSE); callout_init_mtx(&sc->watchdog_ch, &sc->sc_mtx, 0); mbufq_init(&sc->sc_snd, ifqmaxlen); /* wait for NIC to initialize */ for (ntries = 0; ntries < 1000; ntries++) { if ((val = RAL_READ(sc, RT2661_MAC_CSR0)) != 0) break; DELAY(1000); } if (ntries == 1000) { device_printf(sc->sc_dev, "timeout waiting for NIC to initialize\n"); error = EIO; goto fail1; } /* retrieve RF rev. no and various other things from EEPROM */ rt2661_read_eeprom(sc, ic->ic_macaddr); device_printf(dev, "MAC/BBP RT%X, RF %s\n", val, rt2661_get_rf(sc->rf_rev)); /* * Allocate Tx and Rx rings. */ for (ac = 0; ac < 4; ac++) { error = rt2661_alloc_tx_ring(sc, &sc->txq[ac], RT2661_TX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Tx ring %d\n", ac); goto fail2; } } error = rt2661_alloc_tx_ring(sc, &sc->mgtq, RT2661_MGT_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Mgt ring\n"); goto fail2; } error = rt2661_alloc_rx_ring(sc, &sc->rxq, RT2661_RX_RING_COUNT); if (error != 0) { device_printf(sc->sc_dev, "could not allocate Rx ring\n"); goto fail3; } ic->ic_softc = sc; ic->ic_name = device_get_nameunit(dev); ic->ic_opmode = IEEE80211_M_STA; ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ /* set device capabilities */ ic->ic_caps = IEEE80211_C_STA /* station mode */ | IEEE80211_C_IBSS /* ibss, nee adhoc, mode */ | IEEE80211_C_HOSTAP /* hostap mode */ | IEEE80211_C_MONITOR /* monitor mode */ | IEEE80211_C_AHDEMO /* adhoc demo mode */ | IEEE80211_C_WDS /* 4-address traffic works */ | IEEE80211_C_MBSS /* mesh point link mode */ | IEEE80211_C_SHPREAMBLE /* short preamble supported */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA /* capable of WPA1+WPA2 */ | IEEE80211_C_BGSCAN /* capable of bg scanning */ #ifdef notyet | IEEE80211_C_TXFRAG /* handle tx frags */ | IEEE80211_C_WME /* 802.11e */ #endif ; rt2661_getradiocaps(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); ieee80211_ifattach(ic); #if 0 ic->ic_wme.wme_update = rt2661_wme_update; #endif ic->ic_scan_start = rt2661_scan_start; ic->ic_scan_end = rt2661_scan_end; ic->ic_getradiocaps = rt2661_getradiocaps; ic->ic_set_channel = rt2661_set_channel; ic->ic_updateslot = rt2661_update_slot; ic->ic_update_promisc = rt2661_update_promisc; ic->ic_raw_xmit = rt2661_raw_xmit; ic->ic_transmit = rt2661_transmit; ic->ic_parent = rt2661_parent; ic->ic_vap_create = rt2661_vap_create; ic->ic_vap_delete = rt2661_vap_delete; ieee80211_radiotap_attach(ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), RT2661_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), RT2661_RX_RADIOTAP_PRESENT); #ifdef RAL_DEBUG SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, 0, "debug msgs"); #endif if (bootverbose) ieee80211_announce(ic); return 0; fail3: rt2661_free_tx_ring(sc, &sc->mgtq); fail2: while (--ac >= 0) rt2661_free_tx_ring(sc, &sc->txq[ac]); fail1: mtx_destroy(&sc->sc_mtx); return error; } int rt2661_detach(void *xsc) { struct rt2661_softc *sc = xsc; struct ieee80211com *ic = &sc->sc_ic; RAL_LOCK(sc); rt2661_stop_locked(sc); RAL_UNLOCK(sc); ieee80211_ifdetach(ic); mbufq_drain(&sc->sc_snd); rt2661_free_tx_ring(sc, &sc->txq[0]); rt2661_free_tx_ring(sc, &sc->txq[1]); rt2661_free_tx_ring(sc, &sc->txq[2]); rt2661_free_tx_ring(sc, &sc->txq[3]); rt2661_free_tx_ring(sc, &sc->mgtq); rt2661_free_rx_ring(sc, &sc->rxq); mtx_destroy(&sc->sc_mtx); return 0; } static struct ieee80211vap * rt2661_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct rt2661_softc *sc = ic->ic_softc; struct rt2661_vap *rvp; struct ieee80211vap *vap; switch (opmode) { case IEEE80211_M_STA: case IEEE80211_M_IBSS: case IEEE80211_M_AHDEMO: case IEEE80211_M_MONITOR: case IEEE80211_M_HOSTAP: case IEEE80211_M_MBSS: /* XXXRP: TBD */ if (!TAILQ_EMPTY(&ic->ic_vaps)) { device_printf(sc->sc_dev, "only 1 vap supported\n"); return NULL; } if (opmode == IEEE80211_M_STA) flags |= IEEE80211_CLONE_NOBEACONS; break; case IEEE80211_M_WDS: if (TAILQ_EMPTY(&ic->ic_vaps) || ic->ic_opmode != IEEE80211_M_HOSTAP) { device_printf(sc->sc_dev, "wds only supported in ap mode\n"); return NULL; } /* * Silently remove any request for a unique * bssid; WDS vap's always share the local * mac address. */ flags &= ~IEEE80211_CLONE_BSSID; break; default: device_printf(sc->sc_dev, "unknown opmode %d\n", opmode); return NULL; } rvp = malloc(sizeof(struct rt2661_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &rvp->ral_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); /* override state transition machine */ rvp->ral_newstate = vap->iv_newstate; vap->iv_newstate = rt2661_newstate; #if 0 vap->iv_update_beacon = rt2661_beacon_update; #endif ieee80211_ratectl_init(vap); /* complete setup */ ieee80211_vap_attach(vap, ieee80211_media_change, ieee80211_media_status, mac); if (TAILQ_FIRST(&ic->ic_vaps) == vap) ic->ic_opmode = opmode; return vap; } static void rt2661_vap_delete(struct ieee80211vap *vap) { struct rt2661_vap *rvp = RT2661_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(rvp, M_80211_VAP); } void rt2661_shutdown(void *xsc) { struct rt2661_softc *sc = xsc; rt2661_stop(sc); } void rt2661_suspend(void *xsc) { struct rt2661_softc *sc = xsc; rt2661_stop(sc); } void rt2661_resume(void *xsc) { struct rt2661_softc *sc = xsc; if (sc->sc_ic.ic_nrunning > 0) rt2661_init(sc); } static void rt2661_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) { if (error != 0) return; KASSERT(nseg == 1, ("too many DMA segments, %d should be 1", nseg)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int rt2661_alloc_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring, int count) { int i, error; ring->count = count; ring->queued = 0; ring->cur = ring->next = ring->stat = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2661_TX_DESC_SIZE, 1, count * RT2661_TX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2661_TX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } - ring->data = malloc(count * sizeof (struct rt2661_tx_data), M_DEVBUF, + ring->data = mallocarray(count, sizeof(struct rt2661_tx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, RT2661_MAX_SCATTER, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { error = bus_dmamap_create(ring->data_dmat, 0, &ring->data[i].map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } } return 0; fail: rt2661_free_tx_ring(sc, ring); return error; } static void rt2661_reset_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring) { struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; int i; for (i = 0; i < ring->count; i++) { desc = &ring->desc[i]; data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } desc->flags = 0; } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->queued = 0; ring->cur = ring->next = ring->stat = 0; } static void rt2661_free_tx_ring(struct rt2661_softc *sc, struct rt2661_tx_ring *ring) { struct rt2661_tx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->ni != NULL) ieee80211_free_node(data->ni); if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2661_alloc_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring, int count) { struct rt2661_rx_desc *desc; struct rt2661_rx_data *data; bus_addr_t physaddr; int i, error; ring->count = count; ring->cur = ring->next = 0; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, count * RT2661_RX_DESC_SIZE, 1, count * RT2661_RX_DESC_SIZE, 0, NULL, NULL, &ring->desc_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create desc DMA tag\n"); goto fail; } error = bus_dmamem_alloc(ring->desc_dmat, (void **)&ring->desc, BUS_DMA_NOWAIT | BUS_DMA_ZERO, &ring->desc_map); if (error != 0) { device_printf(sc->sc_dev, "could not allocate DMA memory\n"); goto fail; } error = bus_dmamap_load(ring->desc_dmat, ring->desc_map, ring->desc, count * RT2661_RX_DESC_SIZE, rt2661_dma_map_addr, &ring->physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load desc DMA map\n"); goto fail; } - ring->data = malloc(count * sizeof (struct rt2661_rx_data), M_DEVBUF, + ring->data = mallocarray(count, sizeof(struct rt2661_rx_data), M_DEVBUF, M_NOWAIT | M_ZERO); if (ring->data == NULL) { device_printf(sc->sc_dev, "could not allocate soft data\n"); error = ENOMEM; goto fail; } /* * Pre-allocate Rx buffers and populate Rx ring. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "could not create data DMA tag\n"); goto fail; } for (i = 0; i < count; i++) { desc = &sc->rxq.desc[i]; data = &sc->rxq.data[i]; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "could not create DMA map\n"); goto fail; } data->m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (data->m == NULL) { device_printf(sc->sc_dev, "could not allocate rx mbuf\n"); error = ENOMEM; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { device_printf(sc->sc_dev, "could not load rx buf DMA map"); goto fail; } desc->flags = htole32(RT2661_RX_BUSY); desc->physaddr = htole32(physaddr); } bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); return 0; fail: rt2661_free_rx_ring(sc, ring); return error; } static void rt2661_reset_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring) { int i; for (i = 0; i < ring->count; i++) ring->desc[i].flags = htole32(RT2661_RX_BUSY); bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_PREWRITE); ring->cur = ring->next = 0; } static void rt2661_free_rx_ring(struct rt2661_softc *sc, struct rt2661_rx_ring *ring) { struct rt2661_rx_data *data; int i; if (ring->desc != NULL) { bus_dmamap_sync(ring->desc_dmat, ring->desc_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->desc_dmat, ring->desc_map); bus_dmamem_free(ring->desc_dmat, ring->desc, ring->desc_map); } if (ring->desc_dmat != NULL) bus_dma_tag_destroy(ring->desc_dmat); if (ring->data != NULL) { for (i = 0; i < ring->count; i++) { data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } free(ring->data, M_DEVBUF); } if (ring->data_dmat != NULL) bus_dma_tag_destroy(ring->data_dmat); } static int rt2661_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct rt2661_vap *rvp = RT2661_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct rt2661_softc *sc = ic->ic_softc; int error; if (nstate == IEEE80211_S_INIT && vap->iv_state == IEEE80211_S_RUN) { uint32_t tmp; /* abort TSF synchronization */ tmp = RAL_READ(sc, RT2661_TXRX_CSR9); RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0x00ffffff); } error = rvp->ral_newstate(vap, nstate, arg); if (error == 0 && nstate == IEEE80211_S_RUN) { struct ieee80211_node *ni = vap->iv_bss; if (vap->iv_opmode != IEEE80211_M_MONITOR) { rt2661_enable_mrr(sc); rt2661_set_txpreamble(sc); rt2661_set_basicrates(sc, &ni->ni_rates); rt2661_set_bssid(sc, ni->ni_bssid); } if (vap->iv_opmode == IEEE80211_M_HOSTAP || vap->iv_opmode == IEEE80211_M_IBSS || vap->iv_opmode == IEEE80211_M_MBSS) { error = rt2661_prepare_beacon(sc, vap); if (error != 0) return error; } if (vap->iv_opmode != IEEE80211_M_MONITOR) rt2661_enable_tsf_sync(sc); else rt2661_enable_tsf(sc); } return error; } /* * Read 16 bits at address 'addr' from the serial EEPROM (either 93C46 or * 93C66). */ static uint16_t rt2661_eeprom_read(struct rt2661_softc *sc, uint8_t addr) { uint32_t tmp; uint16_t val; int n; /* clock C once before the first command */ RT2661_EEPROM_CTL(sc, 0); RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); RT2661_EEPROM_CTL(sc, RT2661_S); /* write start bit (1) */ RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C); /* write READ opcode (10) */ RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_D | RT2661_C); RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); /* write address (A5-A0 or A7-A0) */ n = (RAL_READ(sc, RT2661_E2PROM_CSR) & RT2661_93C46) ? 5 : 7; for (; n >= 0; n--) { RT2661_EEPROM_CTL(sc, RT2661_S | (((addr >> n) & 1) << RT2661_SHIFT_D)); RT2661_EEPROM_CTL(sc, RT2661_S | (((addr >> n) & 1) << RT2661_SHIFT_D) | RT2661_C); } RT2661_EEPROM_CTL(sc, RT2661_S); /* read data Q15-Q0 */ val = 0; for (n = 15; n >= 0; n--) { RT2661_EEPROM_CTL(sc, RT2661_S | RT2661_C); tmp = RAL_READ(sc, RT2661_E2PROM_CSR); val |= ((tmp & RT2661_Q) >> RT2661_SHIFT_Q) << n; RT2661_EEPROM_CTL(sc, RT2661_S); } RT2661_EEPROM_CTL(sc, 0); /* clear Chip Select and clock C */ RT2661_EEPROM_CTL(sc, RT2661_S); RT2661_EEPROM_CTL(sc, 0); RT2661_EEPROM_CTL(sc, RT2661_C); return val; } static void rt2661_tx_intr(struct rt2661_softc *sc) { struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; struct rt2661_tx_ring *txq; struct rt2661_tx_data *data; uint32_t val; int error, qid; txs->flags = IEEE80211_RATECTL_TX_FAIL_LONG; for (;;) { struct ieee80211_node *ni; struct mbuf *m; val = RAL_READ(sc, RT2661_STA_CSR4); if (!(val & RT2661_TX_STAT_VALID)) break; /* retrieve the queue in which this frame was sent */ qid = RT2661_TX_QID(val); txq = (qid <= 3) ? &sc->txq[qid] : &sc->mgtq; /* retrieve rate control algorithm context */ data = &txq->data[txq->stat]; m = data->m; data->m = NULL; ni = data->ni; data->ni = NULL; /* if no frame has been sent, ignore */ if (ni == NULL) continue; switch (RT2661_TX_RESULT(val)) { case RT2661_TX_SUCCESS: txs->status = IEEE80211_RATECTL_TX_SUCCESS; txs->long_retries = RT2661_TX_RETRYCNT(val); DPRINTFN(sc, 10, "data frame sent successfully after " "%d retries\n", txs->long_retries); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(ni, txs); error = 0; break; case RT2661_TX_RETRY_FAIL: txs->status = IEEE80211_RATECTL_TX_FAIL_LONG; txs->long_retries = RT2661_TX_RETRYCNT(val); DPRINTFN(sc, 9, "%s\n", "sending data frame failed (too much retries)"); if (data->rix != IEEE80211_FIXED_RATE_NONE) ieee80211_ratectl_tx_complete(ni, txs); error = 1; break; default: /* other failure */ device_printf(sc->sc_dev, "sending data frame failed 0x%08x\n", val); error = 1; } DPRINTFN(sc, 15, "tx done q=%d idx=%u\n", qid, txq->stat); txq->queued--; if (++txq->stat >= txq->count) /* faster than % count */ txq->stat = 0; ieee80211_tx_complete(ni, m, error); } sc->sc_tx_timer = 0; rt2661_start(sc); } static void rt2661_tx_dma_intr(struct rt2661_softc *sc, struct rt2661_tx_ring *txq) { struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_POSTREAD); for (;;) { desc = &txq->desc[txq->next]; data = &txq->data[txq->next]; if ((le32toh(desc->flags) & RT2661_TX_BUSY) || !(le32toh(desc->flags) & RT2661_TX_VALID)) break; bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->data_dmat, data->map); /* descriptor is no longer valid */ desc->flags &= ~htole32(RT2661_TX_VALID); DPRINTFN(sc, 15, "tx dma done q=%p idx=%u\n", txq, txq->next); if (++txq->next >= txq->count) /* faster than % count */ txq->next = 0; } bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); } static void rt2661_rx_intr(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct rt2661_rx_desc *desc; struct rt2661_rx_data *data; bus_addr_t physaddr; struct ieee80211_frame *wh; struct ieee80211_node *ni; struct mbuf *mnew, *m; int error; bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_POSTREAD); for (;;) { int8_t rssi, nf; desc = &sc->rxq.desc[sc->rxq.cur]; data = &sc->rxq.data[sc->rxq.cur]; if (le32toh(desc->flags) & RT2661_RX_BUSY) break; if ((le32toh(desc->flags) & RT2661_RX_PHY_ERROR) || (le32toh(desc->flags) & RT2661_RX_CRC_ERROR)) { /* * This should not happen since we did not request * to receive those frames when we filled TXRX_CSR0. */ DPRINTFN(sc, 5, "PHY or CRC error flags 0x%08x\n", le32toh(desc->flags)); counter_u64_add(ic->ic_ierrors, 1); goto skip; } if ((le32toh(desc->flags) & RT2661_RX_CIPHER_MASK) != 0) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } /* * Try to allocate a new mbuf for this ring element and load it * before processing the current mbuf. If the ring element * cannot be loaded, drop the received packet and reuse the old * mbuf. In the unlikely case that the old mbuf can't be * reloaded either, explicitly panic. */ mnew = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (mnew == NULL) { counter_u64_add(ic->ic_ierrors, 1); goto skip; } bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->rxq.data_dmat, data->map); error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(mnew, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { m_freem(mnew); /* try to reload the old mbuf */ error = bus_dmamap_load(sc->rxq.data_dmat, data->map, mtod(data->m, void *), MCLBYTES, rt2661_dma_map_addr, &physaddr, 0); if (error != 0) { /* very unlikely that it will fail... */ panic("%s: could not load old rx mbuf", device_get_name(sc->sc_dev)); } counter_u64_add(ic->ic_ierrors, 1); goto skip; } /* * New mbuf successfully loaded, update Rx ring and continue * processing. */ m = data->m; data->m = mnew; desc->physaddr = htole32(physaddr); /* finalize mbuf */ m->m_pkthdr.len = m->m_len = (le32toh(desc->flags) >> 16) & 0xfff; rssi = rt2661_get_rssi(sc, desc->rssi); /* Error happened during RSSI conversion. */ if (rssi < 0) rssi = -30; /* XXX ignored by net80211 */ nf = RT2661_NOISE_FLOOR; if (ieee80211_radiotap_active(ic)) { struct rt2661_rx_radiotap_header *tap = &sc->sc_rxtap; uint32_t tsf_lo, tsf_hi; /* get timestamp (low and high 32 bits) */ tsf_hi = RAL_READ(sc, RT2661_TXRX_CSR13); tsf_lo = RAL_READ(sc, RT2661_TXRX_CSR12); tap->wr_tsf = htole64(((uint64_t)tsf_hi << 32) | tsf_lo); tap->wr_flags = 0; tap->wr_rate = ieee80211_plcp2rate(desc->rate, (desc->flags & htole32(RT2661_RX_OFDM)) ? IEEE80211_T_OFDM : IEEE80211_T_CCK); tap->wr_antsignal = nf + rssi; tap->wr_antnoise = nf; } sc->sc_flags |= RAL_INPUT_RUNNING; RAL_UNLOCK(sc); wh = mtod(m, struct ieee80211_frame *); /* send the frame to the 802.11 layer */ ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh); if (ni != NULL) { (void) ieee80211_input(ni, m, rssi, nf); ieee80211_free_node(ni); } else (void) ieee80211_input_all(ic, m, rssi, nf); RAL_LOCK(sc); sc->sc_flags &= ~RAL_INPUT_RUNNING; skip: desc->flags |= htole32(RT2661_RX_BUSY); DPRINTFN(sc, 15, "rx intr idx=%u\n", sc->rxq.cur); sc->rxq.cur = (sc->rxq.cur + 1) % RT2661_RX_RING_COUNT; } bus_dmamap_sync(sc->rxq.desc_dmat, sc->rxq.desc_map, BUS_DMASYNC_PREWRITE); } /* ARGSUSED */ static void rt2661_mcu_beacon_expire(struct rt2661_softc *sc) { /* do nothing */ } static void rt2661_mcu_wakeup(struct rt2661_softc *sc) { RAL_WRITE(sc, RT2661_MAC_CSR11, 5 << 16); RAL_WRITE(sc, RT2661_SOFT_RESET_CSR, 0x7); RAL_WRITE(sc, RT2661_IO_CNTL_CSR, 0x18); RAL_WRITE(sc, RT2661_PCI_USEC_CSR, 0x20); /* send wakeup command to MCU */ rt2661_tx_cmd(sc, RT2661_MCU_CMD_WAKEUP, 0); } static void rt2661_mcu_cmd_intr(struct rt2661_softc *sc) { RAL_READ(sc, RT2661_M2H_CMD_DONE_CSR); RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff); } void rt2661_intr(void *arg) { struct rt2661_softc *sc = arg; uint32_t r1, r2; RAL_LOCK(sc); /* disable MAC and MCU interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffff7f); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff); /* don't re-enable interrupts if we're shutting down */ if (!(sc->sc_flags & RAL_RUNNING)) { RAL_UNLOCK(sc); return; } r1 = RAL_READ(sc, RT2661_INT_SOURCE_CSR); RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, r1); r2 = RAL_READ(sc, RT2661_MCU_INT_SOURCE_CSR); RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, r2); if (r1 & RT2661_MGT_DONE) rt2661_tx_dma_intr(sc, &sc->mgtq); if (r1 & RT2661_RX_DONE) rt2661_rx_intr(sc); if (r1 & RT2661_TX0_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[0]); if (r1 & RT2661_TX1_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[1]); if (r1 & RT2661_TX2_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[2]); if (r1 & RT2661_TX3_DMA_DONE) rt2661_tx_dma_intr(sc, &sc->txq[3]); if (r1 & RT2661_TX_DONE) rt2661_tx_intr(sc); if (r2 & RT2661_MCU_CMD_DONE) rt2661_mcu_cmd_intr(sc); if (r2 & RT2661_MCU_BEACON_EXPIRE) rt2661_mcu_beacon_expire(sc); if (r2 & RT2661_MCU_WAKEUP) rt2661_mcu_wakeup(sc); /* re-enable MAC and MCU interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0); RAL_UNLOCK(sc); } static uint8_t rt2661_plcp_signal(int rate) { switch (rate) { /* OFDM rates (cf IEEE Std 802.11a-1999, pp. 14 Table 80) */ case 12: return 0xb; case 18: return 0xf; case 24: return 0xa; case 36: return 0xe; case 48: return 0x9; case 72: return 0xd; case 96: return 0x8; case 108: return 0xc; /* CCK rates (NB: not IEEE std, device-specific) */ case 2: return 0x0; case 4: return 0x1; case 11: return 0x2; case 22: return 0x3; } return 0xff; /* XXX unsupported/unknown rate */ } static void rt2661_setup_tx_desc(struct rt2661_softc *sc, struct rt2661_tx_desc *desc, uint32_t flags, uint16_t xflags, int len, int rate, const bus_dma_segment_t *segs, int nsegs, int ac) { struct ieee80211com *ic = &sc->sc_ic; uint16_t plcp_length; int i, remainder; desc->flags = htole32(flags); desc->flags |= htole32(len << 16); desc->flags |= htole32(RT2661_TX_BUSY | RT2661_TX_VALID); desc->xflags = htole16(xflags); desc->xflags |= htole16(nsegs << 13); desc->wme = htole16( RT2661_QID(ac) | RT2661_AIFSN(2) | RT2661_LOGCWMIN(4) | RT2661_LOGCWMAX(10)); /* * Remember in which queue this frame was sent. This field is driver * private data only. It will be made available by the NIC in STA_CSR4 * on Tx interrupts. */ desc->qid = ac; /* setup PLCP fields */ desc->plcp_signal = rt2661_plcp_signal(rate); desc->plcp_service = 4; len += IEEE80211_CRC_LEN; if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) { desc->flags |= htole32(RT2661_TX_OFDM); plcp_length = len & 0xfff; desc->plcp_length_hi = plcp_length >> 6; desc->plcp_length_lo = plcp_length & 0x3f; } else { plcp_length = howmany(16 * len, rate); if (rate == 22) { remainder = (16 * len) % 22; if (remainder != 0 && remainder < 7) desc->plcp_service |= RT2661_PLCP_LENGEXT; } desc->plcp_length_hi = plcp_length >> 8; desc->plcp_length_lo = plcp_length & 0xff; if (rate != 2 && (ic->ic_flags & IEEE80211_F_SHPREAMBLE)) desc->plcp_signal |= 0x08; } /* RT2x61 supports scatter with up to 5 segments */ for (i = 0; i < nsegs; i++) { desc->addr[i] = htole32(segs[i].ds_addr); desc->len [i] = htole16(segs[i].ds_len); } } static int rt2661_tx_mgt(struct rt2661_softc *sc, struct mbuf *m0, struct ieee80211_node *ni) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct ieee80211_frame *wh; struct ieee80211_key *k; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; uint16_t dur; uint32_t flags = 0; /* XXX HWSEQ */ int nsegs, rate, error; desc = &sc->mgtq.desc[sc->mgtq.cur]; data = &sc->mgtq.data[sc->mgtq.cur]; rate = ni->ni_txparms->mgmtrate; wh = mtod(m0, struct ieee80211_frame *); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } } error = bus_dmamap_load_mbuf_sg(sc->mgtq.data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (ieee80211_radiotap_active_vap(vap)) { struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* management frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; wh = mtod(m0, struct ieee80211_frame *); if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2661_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); /* tell hardware to add timestamp in probe responses */ if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_RESP)) flags |= RT2661_TX_TIMESTAMP; } rt2661_setup_tx_desc(sc, desc, flags, 0 /* XXX HWSEQ */, m0->m_pkthdr.len, rate, segs, nsegs, RT2661_QID_MGT); bus_dmamap_sync(sc->mgtq.data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(sc->mgtq.desc_dmat, sc->mgtq.desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending mgt frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, sc->mgtq.cur, rate); /* kick mgt */ sc->mgtq.queued++; sc->mgtq.cur = (sc->mgtq.cur + 1) % RT2661_MGT_RING_COUNT; RAL_WRITE(sc, RT2661_TX_CNTL_CSR, RT2661_KICK_MGT); return 0; } static int rt2661_sendprot(struct rt2661_softc *sc, int ac, const struct mbuf *m, struct ieee80211_node *ni, int prot, int rate) { struct ieee80211com *ic = ni->ni_ic; struct rt2661_tx_ring *txq = &sc->txq[ac]; const struct ieee80211_frame *wh; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct mbuf *mprot; int protrate, ackrate, pktlen, flags, isshort, error; uint16_t dur; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; int nsegs; KASSERT(prot == IEEE80211_PROT_RTSCTS || prot == IEEE80211_PROT_CTSONLY, ("protection %d", prot)); wh = mtod(m, const struct ieee80211_frame *); pktlen = m->m_pkthdr.len + IEEE80211_CRC_LEN; protrate = ieee80211_ctl_rate(ic->ic_rt, rate); ackrate = ieee80211_ack_rate(ic->ic_rt, rate); isshort = (ic->ic_flags & IEEE80211_F_SHPREAMBLE) != 0; dur = ieee80211_compute_duration(ic->ic_rt, pktlen, rate, isshort) + ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags = RT2661_TX_MORE_FRAG; if (prot == IEEE80211_PROT_RTSCTS) { /* NB: CTS is the same size as an ACK */ dur += ieee80211_ack_duration(ic->ic_rt, rate, isshort); flags |= RT2661_TX_NEED_ACK; mprot = ieee80211_alloc_rts(ic, wh->i_addr1, wh->i_addr2, dur); } else { mprot = ieee80211_alloc_cts(ic, ni->ni_vap->iv_myaddr, dur); } if (mprot == NULL) { /* XXX stat + msg */ return ENOBUFS; } data = &txq->data[txq->cur]; desc = &txq->desc[txq->cur]; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, mprot, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(mprot); return error; } data->m = mprot; data->ni = ieee80211_ref_node(ni); /* ctl frames are not taken into account for amrr */ data->rix = IEEE80211_FIXED_RATE_NONE; rt2661_setup_tx_desc(sc, desc, flags, 0, mprot->m_pkthdr.len, protrate, segs, 1, ac); bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); txq->queued++; txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT; return 0; } static int rt2661_tx_data(struct rt2661_softc *sc, struct mbuf *m0, struct ieee80211_node *ni, int ac) { struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = &sc->sc_ic; struct rt2661_tx_ring *txq = &sc->txq[ac]; struct rt2661_tx_desc *desc; struct rt2661_tx_data *data; struct ieee80211_frame *wh; const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211_key *k; struct mbuf *mnew; bus_dma_segment_t segs[RT2661_MAX_SCATTER]; uint16_t dur; uint32_t flags; int error, nsegs, rate, noack = 0; wh = mtod(m0, struct ieee80211_frame *); if (m0->m_flags & M_EAPOL) { rate = tp->mgmtrate; } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) { rate = tp->mcastrate; } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) { rate = tp->ucastrate; } else { (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } rate &= IEEE80211_RATE_VAL; if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) noack = !! ieee80211_wme_vap_ac_is_noack(vap, ac); if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { k = ieee80211_crypto_encap(ni, m0); if (k == NULL) { m_freem(m0); return ENOBUFS; } /* packet header may have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { int prot = IEEE80211_PROT_NONE; if (m0->m_pkthdr.len + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) prot = IEEE80211_PROT_RTSCTS; else if ((ic->ic_flags & IEEE80211_F_USEPROT) && ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_OFDM) prot = ic->ic_protmode; if (prot != IEEE80211_PROT_NONE) { error = rt2661_sendprot(sc, ac, m0, ni, prot, rate); if (error) { m_freem(m0); return error; } flags |= RT2661_TX_LONG_RETRY | RT2661_TX_IFS; } } data = &txq->data[txq->cur]; desc = &txq->desc[txq->cur]; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } if (error != 0) { mnew = m_defrag(m0, M_NOWAIT); if (mnew == NULL) { device_printf(sc->sc_dev, "could not defragment mbuf\n"); m_freem(m0); return ENOBUFS; } m0 = mnew; error = bus_dmamap_load_mbuf_sg(txq->data_dmat, data->map, m0, segs, &nsegs, 0); if (error != 0) { device_printf(sc->sc_dev, "could not map mbuf (error %d)\n", error); m_freem(m0); return error; } /* packet header have moved, reset our local pointer */ wh = mtod(m0, struct ieee80211_frame *); } if (ieee80211_radiotap_active_vap(vap)) { struct rt2661_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; ieee80211_radiotap_tx(vap, m0); } data->m = m0; data->ni = ni; /* remember link conditions for rate adaptation algorithm */ if (tp->ucastrate == IEEE80211_FIXED_RATE_NONE) { data->rix = ni->ni_txrate; /* XXX probably need last rssi value and not avg */ data->rssi = ic->ic_node_getrssi(ni); } else data->rix = IEEE80211_FIXED_RATE_NONE; if (!noack && !IEEE80211_IS_MULTICAST(wh->i_addr1)) { flags |= RT2661_TX_NEED_ACK; dur = ieee80211_ack_duration(ic->ic_rt, rate, ic->ic_flags & IEEE80211_F_SHPREAMBLE); *(uint16_t *)wh->i_dur = htole16(dur); } rt2661_setup_tx_desc(sc, desc, flags, 0, m0->m_pkthdr.len, rate, segs, nsegs, ac); bus_dmamap_sync(txq->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(txq->desc_dmat, txq->desc_map, BUS_DMASYNC_PREWRITE); DPRINTFN(sc, 10, "sending data frame len=%u idx=%u rate=%u\n", m0->m_pkthdr.len, txq->cur, rate); /* kick Tx */ txq->queued++; txq->cur = (txq->cur + 1) % RT2661_TX_RING_COUNT; RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 1 << ac); return 0; } static int rt2661_transmit(struct ieee80211com *ic, struct mbuf *m) { struct rt2661_softc *sc = ic->ic_softc; int error; RAL_LOCK(sc); if ((sc->sc_flags & RAL_RUNNING) == 0) { RAL_UNLOCK(sc); return (ENXIO); } error = mbufq_enqueue(&sc->sc_snd, m); if (error) { RAL_UNLOCK(sc); return (error); } rt2661_start(sc); RAL_UNLOCK(sc); return (0); } static void rt2661_start(struct rt2661_softc *sc) { struct mbuf *m; struct ieee80211_node *ni; int ac; RAL_LOCK_ASSERT(sc); /* prevent management frames from being sent if we're not ready */ if (!(sc->sc_flags & RAL_RUNNING) || sc->sc_invalid) return; while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) { ac = M_WME_GETAC(m); if (sc->txq[ac].queued >= RT2661_TX_RING_COUNT - 1) { /* there is no place left in this ring */ mbufq_prepend(&sc->sc_snd, m); break; } ni = (struct ieee80211_node *) m->m_pkthdr.rcvif; if (rt2661_tx_data(sc, m, ni, ac) != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); break; } sc->sc_tx_timer = 5; } } static int rt2661_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct rt2661_softc *sc = ic->ic_softc; RAL_LOCK(sc); /* prevent management frames from being sent if we're not ready */ if (!(sc->sc_flags & RAL_RUNNING)) { RAL_UNLOCK(sc); m_freem(m); return ENETDOWN; } if (sc->mgtq.queued >= RT2661_MGT_RING_COUNT) { RAL_UNLOCK(sc); m_freem(m); return ENOBUFS; /* XXX */ } /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. * XXX raw path */ if (rt2661_tx_mgt(sc, m, ni) != 0) goto bad; sc->sc_tx_timer = 5; RAL_UNLOCK(sc); return 0; bad: RAL_UNLOCK(sc); return EIO; /* XXX */ } static void rt2661_watchdog(void *arg) { struct rt2661_softc *sc = (struct rt2661_softc *)arg; RAL_LOCK_ASSERT(sc); KASSERT(sc->sc_flags & RAL_RUNNING, ("not running")); if (sc->sc_invalid) /* card ejected */ return; if (sc->sc_tx_timer > 0 && --sc->sc_tx_timer == 0) { device_printf(sc->sc_dev, "device timeout\n"); rt2661_init_locked(sc); counter_u64_add(sc->sc_ic.ic_oerrors, 1); /* NB: callout is reset in rt2661_init() */ return; } callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc); } static void rt2661_parent(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; int startall = 0; RAL_LOCK(sc); if (ic->ic_nrunning > 0) { if ((sc->sc_flags & RAL_RUNNING) == 0) { rt2661_init_locked(sc); startall = 1; } else rt2661_update_promisc(ic); } else if (sc->sc_flags & RAL_RUNNING) rt2661_stop_locked(sc); RAL_UNLOCK(sc); if (startall) ieee80211_start_all(ic); } static void rt2661_bbp_write(struct rt2661_softc *sc, uint8_t reg, uint8_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to BBP\n"); return; } tmp = RT2661_BBP_BUSY | (reg & 0x7f) << 8 | val; RAL_WRITE(sc, RT2661_PHY_CSR3, tmp); DPRINTFN(sc, 15, "BBP R%u <- 0x%02x\n", reg, val); } static uint8_t rt2661_bbp_read(struct rt2661_softc *sc, uint8_t reg) { uint32_t val; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR3) & RT2661_BBP_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } val = RT2661_BBP_BUSY | RT2661_BBP_READ | reg << 8; RAL_WRITE(sc, RT2661_PHY_CSR3, val); for (ntries = 0; ntries < 100; ntries++) { val = RAL_READ(sc, RT2661_PHY_CSR3); if (!(val & RT2661_BBP_BUSY)) return val & 0xff; DELAY(1); } device_printf(sc->sc_dev, "could not read from BBP\n"); return 0; } static void rt2661_rf_write(struct rt2661_softc *sc, uint8_t reg, uint32_t val) { uint32_t tmp; int ntries; for (ntries = 0; ntries < 100; ntries++) { if (!(RAL_READ(sc, RT2661_PHY_CSR4) & RT2661_RF_BUSY)) break; DELAY(1); } if (ntries == 100) { device_printf(sc->sc_dev, "could not write to RF\n"); return; } tmp = RT2661_RF_BUSY | RT2661_RF_21BIT | (val & 0x1fffff) << 2 | (reg & 3); RAL_WRITE(sc, RT2661_PHY_CSR4, tmp); /* remember last written value in sc */ sc->rf_regs[reg] = val; DPRINTFN(sc, 15, "RF R[%u] <- 0x%05x\n", reg & 3, val & 0x1fffff); } static int rt2661_tx_cmd(struct rt2661_softc *sc, uint8_t cmd, uint16_t arg) { if (RAL_READ(sc, RT2661_H2M_MAILBOX_CSR) & RT2661_H2M_BUSY) return EIO; /* there is already a command pending */ RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR, RT2661_H2M_BUSY | RT2661_TOKEN_NO_INTR << 16 | arg); RAL_WRITE(sc, RT2661_HOST_CMD_CSR, RT2661_KICK_CMD | cmd); return 0; } static void rt2661_select_antenna(struct rt2661_softc *sc) { uint8_t bbp4, bbp77; uint32_t tmp; bbp4 = rt2661_bbp_read(sc, 4); bbp77 = rt2661_bbp_read(sc, 77); /* TBD */ /* make sure Rx is disabled before switching antenna */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); rt2661_bbp_write(sc, 4, bbp4); rt2661_bbp_write(sc, 77, bbp77); /* restore Rx filter */ RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); } /* * Enable multi-rate retries for frames sent at OFDM rates. * In 802.11b/g mode, allow fallback to CCK rates. */ static void rt2661_enable_mrr(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR4); tmp &= ~RT2661_MRR_CCK_FALLBACK; if (!IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan)) tmp |= RT2661_MRR_CCK_FALLBACK; tmp |= RT2661_MRR_ENABLED; RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp); } static void rt2661_set_txpreamble(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR4); tmp &= ~RT2661_SHORT_PREAMBLE; if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) tmp |= RT2661_SHORT_PREAMBLE; RAL_WRITE(sc, RT2661_TXRX_CSR4, tmp); } static void rt2661_set_basicrates(struct rt2661_softc *sc, const struct ieee80211_rateset *rs) { struct ieee80211com *ic = &sc->sc_ic; uint32_t mask = 0; uint8_t rate; int i; for (i = 0; i < rs->rs_nrates; i++) { rate = rs->rs_rates[i]; if (!(rate & IEEE80211_RATE_BASIC)) continue; mask |= 1 << ieee80211_legacy_rate_lookup(ic->ic_rt, IEEE80211_RV(rate)); } RAL_WRITE(sc, RT2661_TXRX_CSR5, mask); DPRINTF(sc, "Setting basic rate mask to 0x%x\n", mask); } /* * Reprogram MAC/BBP to switch to a new band. Values taken from the reference * driver. */ static void rt2661_select_band(struct rt2661_softc *sc, struct ieee80211_channel *c) { uint8_t bbp17, bbp35, bbp96, bbp97, bbp98, bbp104; uint32_t tmp; /* update all BBP registers that depend on the band */ bbp17 = 0x20; bbp96 = 0x48; bbp104 = 0x2c; bbp35 = 0x50; bbp97 = 0x48; bbp98 = 0x48; if (IEEE80211_IS_CHAN_5GHZ(c)) { bbp17 += 0x08; bbp96 += 0x10; bbp104 += 0x0c; bbp35 += 0x10; bbp97 += 0x10; bbp98 += 0x10; } if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { bbp17 += 0x10; bbp96 += 0x10; bbp104 += 0x10; } rt2661_bbp_write(sc, 17, bbp17); rt2661_bbp_write(sc, 96, bbp96); rt2661_bbp_write(sc, 104, bbp104); if ((IEEE80211_IS_CHAN_2GHZ(c) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(c) && sc->ext_5ghz_lna)) { rt2661_bbp_write(sc, 75, 0x80); rt2661_bbp_write(sc, 86, 0x80); rt2661_bbp_write(sc, 88, 0x80); } rt2661_bbp_write(sc, 35, bbp35); rt2661_bbp_write(sc, 97, bbp97); rt2661_bbp_write(sc, 98, bbp98); tmp = RAL_READ(sc, RT2661_PHY_CSR0); tmp &= ~(RT2661_PA_PE_2GHZ | RT2661_PA_PE_5GHZ); if (IEEE80211_IS_CHAN_2GHZ(c)) tmp |= RT2661_PA_PE_2GHZ; else tmp |= RT2661_PA_PE_5GHZ; RAL_WRITE(sc, RT2661_PHY_CSR0, tmp); } static void rt2661_set_chan(struct rt2661_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; const struct rfprog *rfprog; uint8_t bbp3, bbp94 = RT2661_BBPR94_DEFAULT; int8_t power; u_int i, chan; chan = ieee80211_chan2ieee(ic, c); KASSERT(chan != 0 && chan != IEEE80211_CHAN_ANY, ("chan 0x%x", chan)); /* select the appropriate RF settings based on what EEPROM says */ rfprog = (sc->rfprog == 0) ? rt2661_rf5225_1 : rt2661_rf5225_2; /* find the settings for this channel (we know it exists) */ for (i = 0; rfprog[i].chan != chan; i++); power = sc->txpow[i]; if (power < 0) { bbp94 += power; power = 0; } else if (power > 31) { bbp94 += power - 31; power = 31; } /* * If we are switching from the 2GHz band to the 5GHz band or * vice-versa, BBP registers need to be reprogrammed. */ if (c->ic_flags != sc->sc_curchan->ic_flags) { rt2661_select_band(sc, c); rt2661_select_antenna(sc); } sc->sc_curchan = c; rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); DELAY(200); rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7 | 1); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); DELAY(200); rt2661_rf_write(sc, RAL_RF1, rfprog[i].r1); rt2661_rf_write(sc, RAL_RF2, rfprog[i].r2); rt2661_rf_write(sc, RAL_RF3, rfprog[i].r3 | power << 7); rt2661_rf_write(sc, RAL_RF4, rfprog[i].r4 | sc->rffreq << 10); /* enable smart mode for MIMO-capable RFs */ bbp3 = rt2661_bbp_read(sc, 3); bbp3 &= ~RT2661_SMART_MODE; if (sc->rf_rev == RT2661_RF_5325 || sc->rf_rev == RT2661_RF_2529) bbp3 |= RT2661_SMART_MODE; rt2661_bbp_write(sc, 3, bbp3); if (bbp94 != RT2661_BBPR94_DEFAULT) rt2661_bbp_write(sc, 94, bbp94); /* 5GHz radio needs a 1ms delay here */ if (IEEE80211_IS_CHAN_5GHZ(c)) DELAY(1000); } static void rt2661_set_bssid(struct rt2661_softc *sc, const uint8_t *bssid) { uint32_t tmp; tmp = bssid[0] | bssid[1] << 8 | bssid[2] << 16 | bssid[3] << 24; RAL_WRITE(sc, RT2661_MAC_CSR4, tmp); tmp = bssid[4] | bssid[5] << 8 | RT2661_ONE_BSSID << 16; RAL_WRITE(sc, RT2661_MAC_CSR5, tmp); } static void rt2661_set_macaddr(struct rt2661_softc *sc, const uint8_t *addr) { uint32_t tmp; tmp = addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24; RAL_WRITE(sc, RT2661_MAC_CSR2, tmp); tmp = addr[4] | addr[5] << 8; RAL_WRITE(sc, RT2661_MAC_CSR3, tmp); } static void rt2661_update_promisc(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; uint32_t tmp; tmp = RAL_READ(sc, RT2661_TXRX_CSR0); tmp &= ~RT2661_DROP_NOT_TO_ME; if (ic->ic_promisc == 0) tmp |= RT2661_DROP_NOT_TO_ME; RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); DPRINTF(sc, "%s promiscuous mode\n", (ic->ic_promisc > 0) ? "entering" : "leaving"); } /* * Update QoS (802.11e) settings for each h/w Tx ring. */ static int rt2661_wme_update(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; struct chanAccParams chp; const struct wmeParams *wmep; ieee80211_wme_ic_getparams(ic, &chp); wmep = chp.cap_wmeParams; /* XXX: not sure about shifts. */ /* XXX: the reference driver plays with AC_VI settings too. */ /* update TxOp */ RAL_WRITE(sc, RT2661_AC_TXOP_CSR0, wmep[WME_AC_BE].wmep_txopLimit << 16 | wmep[WME_AC_BK].wmep_txopLimit); RAL_WRITE(sc, RT2661_AC_TXOP_CSR1, wmep[WME_AC_VI].wmep_txopLimit << 16 | wmep[WME_AC_VO].wmep_txopLimit); /* update CWmin */ RAL_WRITE(sc, RT2661_CWMIN_CSR, wmep[WME_AC_BE].wmep_logcwmin << 12 | wmep[WME_AC_BK].wmep_logcwmin << 8 | wmep[WME_AC_VI].wmep_logcwmin << 4 | wmep[WME_AC_VO].wmep_logcwmin); /* update CWmax */ RAL_WRITE(sc, RT2661_CWMAX_CSR, wmep[WME_AC_BE].wmep_logcwmax << 12 | wmep[WME_AC_BK].wmep_logcwmax << 8 | wmep[WME_AC_VI].wmep_logcwmax << 4 | wmep[WME_AC_VO].wmep_logcwmax); /* update Aifsn */ RAL_WRITE(sc, RT2661_AIFSN_CSR, wmep[WME_AC_BE].wmep_aifsn << 12 | wmep[WME_AC_BK].wmep_aifsn << 8 | wmep[WME_AC_VI].wmep_aifsn << 4 | wmep[WME_AC_VO].wmep_aifsn); return 0; } static void rt2661_update_slot(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; uint8_t slottime; uint32_t tmp; slottime = IEEE80211_GET_SLOTTIME(ic); tmp = RAL_READ(sc, RT2661_MAC_CSR9); tmp = (tmp & ~0xff) | slottime; RAL_WRITE(sc, RT2661_MAC_CSR9, tmp); } static const char * rt2661_get_rf(int rev) { switch (rev) { case RT2661_RF_5225: return "RT5225"; case RT2661_RF_5325: return "RT5325 (MIMO XR)"; case RT2661_RF_2527: return "RT2527"; case RT2661_RF_2529: return "RT2529 (MIMO XR)"; default: return "unknown"; } } static void rt2661_read_eeprom(struct rt2661_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) { uint16_t val; int i; /* read MAC address */ val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC01); macaddr[0] = val & 0xff; macaddr[1] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC23); macaddr[2] = val & 0xff; macaddr[3] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_MAC45); macaddr[4] = val & 0xff; macaddr[5] = val >> 8; val = rt2661_eeprom_read(sc, RT2661_EEPROM_ANTENNA); /* XXX: test if different from 0xffff? */ sc->rf_rev = (val >> 11) & 0x1f; sc->hw_radio = (val >> 10) & 0x1; sc->rx_ant = (val >> 4) & 0x3; sc->tx_ant = (val >> 2) & 0x3; sc->nb_ant = val & 0x3; DPRINTF(sc, "RF revision=%d\n", sc->rf_rev); val = rt2661_eeprom_read(sc, RT2661_EEPROM_CONFIG2); sc->ext_5ghz_lna = (val >> 6) & 0x1; sc->ext_2ghz_lna = (val >> 4) & 0x1; DPRINTF(sc, "External 2GHz LNA=%d\nExternal 5GHz LNA=%d\n", sc->ext_2ghz_lna, sc->ext_5ghz_lna); val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_2GHZ_OFFSET); if ((val & 0xff) != 0xff) sc->rssi_2ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_2ghz_corr < -10 || sc->rssi_2ghz_corr > 10) sc->rssi_2ghz_corr = 0; val = rt2661_eeprom_read(sc, RT2661_EEPROM_RSSI_5GHZ_OFFSET); if ((val & 0xff) != 0xff) sc->rssi_5ghz_corr = (int8_t)(val & 0xff); /* signed */ /* Only [-10, 10] is valid */ if (sc->rssi_5ghz_corr < -10 || sc->rssi_5ghz_corr > 10) sc->rssi_5ghz_corr = 0; /* adjust RSSI correction for external low-noise amplifier */ if (sc->ext_2ghz_lna) sc->rssi_2ghz_corr -= 14; if (sc->ext_5ghz_lna) sc->rssi_5ghz_corr -= 14; DPRINTF(sc, "RSSI 2GHz corr=%d\nRSSI 5GHz corr=%d\n", sc->rssi_2ghz_corr, sc->rssi_5ghz_corr); val = rt2661_eeprom_read(sc, RT2661_EEPROM_FREQ_OFFSET); if ((val >> 8) != 0xff) sc->rfprog = (val >> 8) & 0x3; if ((val & 0xff) != 0xff) sc->rffreq = val & 0xff; DPRINTF(sc, "RF prog=%d\nRF freq=%d\n", sc->rfprog, sc->rffreq); /* read Tx power for all a/b/g channels */ for (i = 0; i < 19; i++) { val = rt2661_eeprom_read(sc, RT2661_EEPROM_TXPOWER + i); sc->txpow[i * 2] = (int8_t)(val >> 8); /* signed */ DPRINTF(sc, "Channel=%d Tx power=%d\n", rt2661_rf5225_1[i * 2].chan, sc->txpow[i * 2]); sc->txpow[i * 2 + 1] = (int8_t)(val & 0xff); /* signed */ DPRINTF(sc, "Channel=%d Tx power=%d\n", rt2661_rf5225_1[i * 2 + 1].chan, sc->txpow[i * 2 + 1]); } /* read vendor-specific BBP values */ for (i = 0; i < 16; i++) { val = rt2661_eeprom_read(sc, RT2661_EEPROM_BBP_BASE + i); if (val == 0 || val == 0xffff) continue; /* skip invalid entries */ sc->bbp_prom[i].reg = val >> 8; sc->bbp_prom[i].val = val & 0xff; DPRINTF(sc, "BBP R%d=%02x\n", sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } } static int rt2661_bbp_init(struct rt2661_softc *sc) { int i, ntries; uint8_t val; /* wait for BBP to be ready */ for (ntries = 0; ntries < 100; ntries++) { val = rt2661_bbp_read(sc, 0); if (val != 0 && val != 0xff) break; DELAY(100); } if (ntries == 100) { device_printf(sc->sc_dev, "timeout waiting for BBP\n"); return EIO; } /* initialize BBP registers to default values */ for (i = 0; i < nitems(rt2661_def_bbp); i++) { rt2661_bbp_write(sc, rt2661_def_bbp[i].reg, rt2661_def_bbp[i].val); } /* write vendor-specific BBP values (from EEPROM) */ for (i = 0; i < 16; i++) { if (sc->bbp_prom[i].reg == 0) continue; rt2661_bbp_write(sc, sc->bbp_prom[i].reg, sc->bbp_prom[i].val); } return 0; } static void rt2661_init_locked(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp, sta[3]; int i, error, ntries; RAL_LOCK_ASSERT(sc); if ((sc->sc_flags & RAL_FW_LOADED) == 0) { error = rt2661_load_microcode(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load 8051 microcode, error %d\n", __func__, error); return; } sc->sc_flags |= RAL_FW_LOADED; } rt2661_stop_locked(sc); /* initialize Tx rings */ RAL_WRITE(sc, RT2661_AC1_BASE_CSR, sc->txq[1].physaddr); RAL_WRITE(sc, RT2661_AC0_BASE_CSR, sc->txq[0].physaddr); RAL_WRITE(sc, RT2661_AC2_BASE_CSR, sc->txq[2].physaddr); RAL_WRITE(sc, RT2661_AC3_BASE_CSR, sc->txq[3].physaddr); /* initialize Mgt ring */ RAL_WRITE(sc, RT2661_MGT_BASE_CSR, sc->mgtq.physaddr); /* initialize Rx ring */ RAL_WRITE(sc, RT2661_RX_BASE_CSR, sc->rxq.physaddr); /* initialize Tx rings sizes */ RAL_WRITE(sc, RT2661_TX_RING_CSR0, RT2661_TX_RING_COUNT << 24 | RT2661_TX_RING_COUNT << 16 | RT2661_TX_RING_COUNT << 8 | RT2661_TX_RING_COUNT); RAL_WRITE(sc, RT2661_TX_RING_CSR1, RT2661_TX_DESC_WSIZE << 16 | RT2661_TX_RING_COUNT << 8 | /* XXX: HCCA ring unused */ RT2661_MGT_RING_COUNT); /* initialize Rx rings */ RAL_WRITE(sc, RT2661_RX_RING_CSR, RT2661_RX_DESC_BACK << 16 | RT2661_RX_DESC_WSIZE << 8 | RT2661_RX_RING_COUNT); /* XXX: some magic here */ RAL_WRITE(sc, RT2661_TX_DMA_DST_CSR, 0xaa); /* load base addresses of all 5 Tx rings (4 data + 1 mgt) */ RAL_WRITE(sc, RT2661_LOAD_TX_RING_CSR, 0x1f); /* load base address of Rx ring */ RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 2); /* initialize MAC registers to default values */ for (i = 0; i < nitems(rt2661_def_mac); i++) RAL_WRITE(sc, rt2661_def_mac[i].reg, rt2661_def_mac[i].val); rt2661_set_macaddr(sc, vap ? vap->iv_myaddr : ic->ic_macaddr); /* set host ready */ RAL_WRITE(sc, RT2661_MAC_CSR1, 3); RAL_WRITE(sc, RT2661_MAC_CSR1, 0); /* wait for BBP/RF to wakeup */ for (ntries = 0; ntries < 1000; ntries++) { if (RAL_READ(sc, RT2661_MAC_CSR12) & 8) break; DELAY(1000); } if (ntries == 1000) { printf("timeout waiting for BBP/RF to wakeup\n"); rt2661_stop_locked(sc); return; } if (rt2661_bbp_init(sc) != 0) { rt2661_stop_locked(sc); return; } /* select default channel */ sc->sc_curchan = ic->ic_curchan; rt2661_select_band(sc, sc->sc_curchan); rt2661_select_antenna(sc); rt2661_set_chan(sc, sc->sc_curchan); /* update Rx filter */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0) & 0xffff; tmp |= RT2661_DROP_PHY_ERROR | RT2661_DROP_CRC_ERROR; if (ic->ic_opmode != IEEE80211_M_MONITOR) { tmp |= RT2661_DROP_CTL | RT2661_DROP_VER_ERROR | RT2661_DROP_ACKCTS; if (ic->ic_opmode != IEEE80211_M_HOSTAP && ic->ic_opmode != IEEE80211_M_MBSS) tmp |= RT2661_DROP_TODS; if (ic->ic_promisc == 0) tmp |= RT2661_DROP_NOT_TO_ME; } RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); /* clear STA registers */ RAL_READ_REGION_4(sc, RT2661_STA_CSR0, sta, nitems(sta)); /* initialize ASIC */ RAL_WRITE(sc, RT2661_MAC_CSR1, 4); /* clear any pending interrupt */ RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff); /* enable interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0x0000ff10); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0); /* kick Rx */ RAL_WRITE(sc, RT2661_RX_CNTL_CSR, 1); sc->sc_flags |= RAL_RUNNING; callout_reset(&sc->watchdog_ch, hz, rt2661_watchdog, sc); } static void rt2661_init(void *priv) { struct rt2661_softc *sc = priv; struct ieee80211com *ic = &sc->sc_ic; RAL_LOCK(sc); rt2661_init_locked(sc); RAL_UNLOCK(sc); if (sc->sc_flags & RAL_RUNNING) ieee80211_start_all(ic); /* start all vap's */ } void rt2661_stop_locked(struct rt2661_softc *sc) { volatile int *flags = &sc->sc_flags; uint32_t tmp; while (*flags & RAL_INPUT_RUNNING) msleep(sc, &sc->sc_mtx, 0, "ralrunning", hz/10); callout_stop(&sc->watchdog_ch); sc->sc_tx_timer = 0; if (sc->sc_flags & RAL_RUNNING) { sc->sc_flags &= ~RAL_RUNNING; /* abort Tx (for all 5 Tx rings) */ RAL_WRITE(sc, RT2661_TX_CNTL_CSR, 0x1f << 16); /* disable Rx (value remains after reset!) */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); /* reset ASIC */ RAL_WRITE(sc, RT2661_MAC_CSR1, 3); RAL_WRITE(sc, RT2661_MAC_CSR1, 0); /* disable interrupts */ RAL_WRITE(sc, RT2661_INT_MASK_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_MCU_INT_MASK_CSR, 0xffffffff); /* clear any pending interrupt */ RAL_WRITE(sc, RT2661_INT_SOURCE_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_MCU_INT_SOURCE_CSR, 0xffffffff); /* reset Tx and Rx rings */ rt2661_reset_tx_ring(sc, &sc->txq[0]); rt2661_reset_tx_ring(sc, &sc->txq[1]); rt2661_reset_tx_ring(sc, &sc->txq[2]); rt2661_reset_tx_ring(sc, &sc->txq[3]); rt2661_reset_tx_ring(sc, &sc->mgtq); rt2661_reset_rx_ring(sc, &sc->rxq); } } void rt2661_stop(void *priv) { struct rt2661_softc *sc = priv; RAL_LOCK(sc); rt2661_stop_locked(sc); RAL_UNLOCK(sc); } static int rt2661_load_microcode(struct rt2661_softc *sc) { const struct firmware *fp; const char *imagename; int ntries, error; RAL_LOCK_ASSERT(sc); switch (sc->sc_id) { case 0x0301: imagename = "rt2561sfw"; break; case 0x0302: imagename = "rt2561fw"; break; case 0x0401: imagename = "rt2661fw"; break; default: device_printf(sc->sc_dev, "%s: unexpected pci device id 0x%x, " "don't know how to retrieve firmware\n", __func__, sc->sc_id); return EINVAL; } RAL_UNLOCK(sc); fp = firmware_get(imagename); RAL_LOCK(sc); if (fp == NULL) { device_printf(sc->sc_dev, "%s: unable to retrieve firmware image %s\n", __func__, imagename); return EINVAL; } /* * Load 8051 microcode into NIC. */ /* reset 8051 */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET); /* cancel any pending Host to MCU command */ RAL_WRITE(sc, RT2661_H2M_MAILBOX_CSR, 0); RAL_WRITE(sc, RT2661_M2H_CMD_DONE_CSR, 0xffffffff); RAL_WRITE(sc, RT2661_HOST_CMD_CSR, 0); /* write 8051's microcode */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET | RT2661_MCU_SEL); RAL_WRITE_REGION_1(sc, RT2661_MCU_CODE_BASE, fp->data, fp->datasize); RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, RT2661_MCU_RESET); /* kick 8051's ass */ RAL_WRITE(sc, RT2661_MCU_CNTL_CSR, 0); /* wait for 8051 to initialize */ for (ntries = 0; ntries < 500; ntries++) { if (RAL_READ(sc, RT2661_MCU_CNTL_CSR) & RT2661_MCU_READY) break; DELAY(100); } if (ntries == 500) { device_printf(sc->sc_dev, "%s: timeout waiting for MCU to initialize\n", __func__); error = EIO; } else error = 0; firmware_put(fp, FIRMWARE_UNLOAD); return error; } #ifdef notyet /* * Dynamically tune Rx sensitivity (BBP register 17) based on average RSSI and * false CCA count. This function is called periodically (every seconds) when * in the RUN state. Values taken from the reference driver. */ static void rt2661_rx_tune(struct rt2661_softc *sc) { uint8_t bbp17; uint16_t cca; int lo, hi, dbm; /* * Tuning range depends on operating band and on the presence of an * external low-noise amplifier. */ lo = 0x20; if (IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan)) lo += 0x08; if ((IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan) && sc->ext_2ghz_lna) || (IEEE80211_IS_CHAN_5GHZ(sc->sc_curchan) && sc->ext_5ghz_lna)) lo += 0x10; hi = lo + 0x20; /* retrieve false CCA count since last call (clear on read) */ cca = RAL_READ(sc, RT2661_STA_CSR1) & 0xffff; if (dbm >= -35) { bbp17 = 0x60; } else if (dbm >= -58) { bbp17 = hi; } else if (dbm >= -66) { bbp17 = lo + 0x10; } else if (dbm >= -74) { bbp17 = lo + 0x08; } else { /* RSSI < -74dBm, tune using false CCA count */ bbp17 = sc->bbp17; /* current value */ hi -= 2 * (-74 - dbm); if (hi < lo) hi = lo; if (bbp17 > hi) { bbp17 = hi; } else if (cca > 512) { if (++bbp17 > hi) bbp17 = hi; } else if (cca < 100) { if (--bbp17 < lo) bbp17 = lo; } } if (bbp17 != sc->bbp17) { rt2661_bbp_write(sc, 17, bbp17); sc->bbp17 = bbp17; } } /* * Enter/Leave radar detection mode. * This is for 802.11h additional regulatory domains. */ static void rt2661_radar_start(struct rt2661_softc *sc) { uint32_t tmp; /* disable Rx */ tmp = RAL_READ(sc, RT2661_TXRX_CSR0); RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp | RT2661_DISABLE_RX); rt2661_bbp_write(sc, 82, 0x20); rt2661_bbp_write(sc, 83, 0x00); rt2661_bbp_write(sc, 84, 0x40); /* save current BBP registers values */ sc->bbp18 = rt2661_bbp_read(sc, 18); sc->bbp21 = rt2661_bbp_read(sc, 21); sc->bbp22 = rt2661_bbp_read(sc, 22); sc->bbp16 = rt2661_bbp_read(sc, 16); sc->bbp17 = rt2661_bbp_read(sc, 17); sc->bbp64 = rt2661_bbp_read(sc, 64); rt2661_bbp_write(sc, 18, 0xff); rt2661_bbp_write(sc, 21, 0x3f); rt2661_bbp_write(sc, 22, 0x3f); rt2661_bbp_write(sc, 16, 0xbd); rt2661_bbp_write(sc, 17, sc->ext_5ghz_lna ? 0x44 : 0x34); rt2661_bbp_write(sc, 64, 0x21); /* restore Rx filter */ RAL_WRITE(sc, RT2661_TXRX_CSR0, tmp); } static int rt2661_radar_stop(struct rt2661_softc *sc) { uint8_t bbp66; /* read radar detection result */ bbp66 = rt2661_bbp_read(sc, 66); /* restore BBP registers values */ rt2661_bbp_write(sc, 16, sc->bbp16); rt2661_bbp_write(sc, 17, sc->bbp17); rt2661_bbp_write(sc, 18, sc->bbp18); rt2661_bbp_write(sc, 21, sc->bbp21); rt2661_bbp_write(sc, 22, sc->bbp22); rt2661_bbp_write(sc, 64, sc->bbp64); return bbp66 == 1; } #endif static int rt2661_prepare_beacon(struct rt2661_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = vap->iv_ic; struct rt2661_tx_desc desc; struct mbuf *m0; int rate; if ((m0 = ieee80211_beacon_alloc(vap->iv_bss))== NULL) { device_printf(sc->sc_dev, "could not allocate beacon frame\n"); return ENOBUFS; } /* send beacons at the lowest available rate */ rate = IEEE80211_IS_CHAN_5GHZ(ic->ic_bsschan) ? 12 : 2; rt2661_setup_tx_desc(sc, &desc, RT2661_TX_TIMESTAMP, RT2661_TX_HWSEQ, m0->m_pkthdr.len, rate, NULL, 0, RT2661_QID_MGT); /* copy the first 24 bytes of Tx descriptor into NIC memory */ RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0, (uint8_t *)&desc, 24); /* copy beacon header and payload into NIC memory */ RAL_WRITE_REGION_1(sc, RT2661_HW_BEACON_BASE0 + 24, mtod(m0, uint8_t *), m0->m_pkthdr.len); m_freem(m0); return 0; } /* * Enable TSF synchronization and tell h/w to start sending beacons for IBSS * and HostAP operating modes. */ static void rt2661_enable_tsf_sync(struct rt2661_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint32_t tmp; if (vap->iv_opmode != IEEE80211_M_STA) { /* * Change default 16ms TBTT adjustment to 8ms. * Must be done before enabling beacon generation. */ RAL_WRITE(sc, RT2661_TXRX_CSR10, 1 << 12 | 8); } tmp = RAL_READ(sc, RT2661_TXRX_CSR9) & 0xff000000; /* set beacon interval (in 1/16ms unit) */ tmp |= vap->iv_bss->ni_intval * 16; tmp |= RT2661_TSF_TICKING | RT2661_ENABLE_TBTT; if (vap->iv_opmode == IEEE80211_M_STA) tmp |= RT2661_TSF_MODE(1); else tmp |= RT2661_TSF_MODE(2) | RT2661_GENERATE_BEACON; RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp); } static void rt2661_enable_tsf(struct rt2661_softc *sc) { RAL_WRITE(sc, RT2661_TXRX_CSR9, (RAL_READ(sc, RT2661_TXRX_CSR9) & 0xff000000) | RT2661_TSF_TICKING | RT2661_TSF_MODE(2)); } /* * Retrieve the "Received Signal Strength Indicator" from the raw values * contained in Rx descriptors. The computation depends on which band the * frame was received. Correction values taken from the reference driver. */ static int rt2661_get_rssi(struct rt2661_softc *sc, uint8_t raw) { int lna, agc, rssi; lna = (raw >> 5) & 0x3; agc = raw & 0x1f; if (lna == 0) { /* * No mapping available. * * NB: Since RSSI is relative to noise floor, -1 is * adequate for caller to know error happened. */ return -1; } rssi = (2 * agc) - RT2661_NOISE_FLOOR; if (IEEE80211_IS_CHAN_2GHZ(sc->sc_curchan)) { rssi += sc->rssi_2ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 74; else if (lna == 3) rssi -= 90; } else { rssi += sc->rssi_5ghz_corr; if (lna == 1) rssi -= 64; else if (lna == 2) rssi -= 86; else if (lna == 3) rssi -= 100; } return rssi; } static void rt2661_scan_start(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; uint32_t tmp; /* abort TSF synchronization */ tmp = RAL_READ(sc, RT2661_TXRX_CSR9); RAL_WRITE(sc, RT2661_TXRX_CSR9, tmp & ~0xffffff); rt2661_set_bssid(sc, ieee80211broadcastaddr); } static void rt2661_scan_end(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); rt2661_enable_tsf_sync(sc); /* XXX keep local copy */ rt2661_set_bssid(sc, vap->iv_bss->ni_bssid); } static void rt2661_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct rt2661_softc *sc = ic->ic_softc; uint8_t bands[IEEE80211_MODE_BYTES]; memset(bands, 0, sizeof(bands)); setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); ieee80211_add_channel_list_2ghz(chans, maxchans, nchans, rt2661_chan_2ghz, nitems(rt2661_chan_2ghz), bands, 0); if (sc->rf_rev == RT2661_RF_5225 || sc->rf_rev == RT2661_RF_5325) { setbit(bands, IEEE80211_MODE_11A); ieee80211_add_channel_list_5ghz(chans, maxchans, nchans, rt2661_chan_5ghz, nitems(rt2661_chan_5ghz), bands, 0); } } static void rt2661_set_channel(struct ieee80211com *ic) { struct rt2661_softc *sc = ic->ic_softc; RAL_LOCK(sc); rt2661_set_chan(sc, ic->ic_curchan); RAL_UNLOCK(sc); } Index: head/sys/dev/rp/rp.c =================================================================== --- head/sys/dev/rp/rp.c (revision 327948) +++ head/sys/dev/rp/rp.c (revision 327949) @@ -1,1115 +1,1116 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) Comtrol Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted prodived that the follwoing conditions * are met. * 1. Redistributions of source code must retain the above copyright * notive, this list of conditions and the following disclainer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials prodided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Comtrol Corporation. * 4. The name of Comtrol Corporation may not be used to endorse or * promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY COMTROL CORPORATION ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL COMTROL CORPORATION BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, LIFE OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); /* * rp.c - for RocketPort FreeBSD */ #include "opt_compat.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #define ROCKET_C #include #include static const char RocketPortVersion[] = "3.02"; static Byte_t RData[RDATASIZE] = { 0x00, 0x09, 0xf6, 0x82, 0x02, 0x09, 0x86, 0xfb, 0x04, 0x09, 0x00, 0x0a, 0x06, 0x09, 0x01, 0x0a, 0x08, 0x09, 0x8a, 0x13, 0x0a, 0x09, 0xc5, 0x11, 0x0c, 0x09, 0x86, 0x85, 0x0e, 0x09, 0x20, 0x0a, 0x10, 0x09, 0x21, 0x0a, 0x12, 0x09, 0x41, 0xff, 0x14, 0x09, 0x82, 0x00, 0x16, 0x09, 0x82, 0x7b, 0x18, 0x09, 0x8a, 0x7d, 0x1a, 0x09, 0x88, 0x81, 0x1c, 0x09, 0x86, 0x7a, 0x1e, 0x09, 0x84, 0x81, 0x20, 0x09, 0x82, 0x7c, 0x22, 0x09, 0x0a, 0x0a }; static Byte_t RRegData[RREGDATASIZE]= { 0x00, 0x09, 0xf6, 0x82, /* 00: Stop Rx processor */ 0x08, 0x09, 0x8a, 0x13, /* 04: Tx software flow control */ 0x0a, 0x09, 0xc5, 0x11, /* 08: XON char */ 0x0c, 0x09, 0x86, 0x85, /* 0c: XANY */ 0x12, 0x09, 0x41, 0xff, /* 10: Rx mask char */ 0x14, 0x09, 0x82, 0x00, /* 14: Compare/Ignore #0 */ 0x16, 0x09, 0x82, 0x7b, /* 18: Compare #1 */ 0x18, 0x09, 0x8a, 0x7d, /* 1c: Compare #2 */ 0x1a, 0x09, 0x88, 0x81, /* 20: Interrupt #1 */ 0x1c, 0x09, 0x86, 0x7a, /* 24: Ignore/Replace #1 */ 0x1e, 0x09, 0x84, 0x81, /* 28: Interrupt #2 */ 0x20, 0x09, 0x82, 0x7c, /* 2c: Ignore/Replace #2 */ 0x22, 0x09, 0x0a, 0x0a /* 30: Rx FIFO Enable */ }; #if 0 /* IRQ number to MUDBAC register 2 mapping */ Byte_t sIRQMap[16] = { 0,0,0,0x10,0x20,0x30,0,0,0,0x40,0x50,0x60,0x70,0,0,0x80 }; #endif Byte_t rp_sBitMapClrTbl[8] = { 0xfe,0xfd,0xfb,0xf7,0xef,0xdf,0xbf,0x7f }; Byte_t rp_sBitMapSetTbl[8] = { 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80 }; static void rpfree(void *); /*************************************************************************** Function: sReadAiopID Purpose: Read the AIOP idenfication number directly from an AIOP. Call: sReadAiopID(CtlP, aiop) CONTROLLER_T *CtlP; Ptr to controller structure int aiop: AIOP index Return: int: Flag AIOPID_XXXX if a valid AIOP is found, where X is replace by an identifying number. Flag AIOPID_NULL if no valid AIOP is found Warnings: No context switches are allowed while executing this function. */ int sReadAiopID(CONTROLLER_T *CtlP, int aiop) { Byte_t AiopID; /* ID byte from AIOP */ rp_writeaiop1(CtlP, aiop, _CMD_REG, RESET_ALL); /* reset AIOP */ rp_writeaiop1(CtlP, aiop, _CMD_REG, 0x0); AiopID = rp_readaiop1(CtlP, aiop, _CHN_STAT0) & 0x07; if(AiopID == 0x06) return(1); else /* AIOP does not exist */ return(-1); } /*************************************************************************** Function: sReadAiopNumChan Purpose: Read the number of channels available in an AIOP directly from an AIOP. Call: sReadAiopNumChan(CtlP, aiop) CONTROLLER_T *CtlP; Ptr to controller structure int aiop: AIOP index Return: int: The number of channels available Comments: The number of channels is determined by write/reads from identical offsets within the SRAM address spaces for channels 0 and 4. If the channel 4 space is mirrored to channel 0 it is a 4 channel AIOP, otherwise it is an 8 channel. Warnings: No context switches are allowed while executing this function. */ int sReadAiopNumChan(CONTROLLER_T *CtlP, int aiop) { Word_t x, y; rp_writeaiop4(CtlP, aiop, _INDX_ADDR,0x12340000L); /* write to chan 0 SRAM */ rp_writeaiop2(CtlP, aiop, _INDX_ADDR,0); /* read from SRAM, chan 0 */ x = rp_readaiop2(CtlP, aiop, _INDX_DATA); rp_writeaiop2(CtlP, aiop, _INDX_ADDR,0x4000); /* read from SRAM, chan 4 */ y = rp_readaiop2(CtlP, aiop, _INDX_DATA); if(x != y) /* if different must be 8 chan */ return(8); else return(4); } /*************************************************************************** Function: sInitChan Purpose: Initialization of a channel and channel structure Call: sInitChan(CtlP,ChP,AiopNum,ChanNum) CONTROLLER_T *CtlP; Ptr to controller structure CHANNEL_T *ChP; Ptr to channel structure int AiopNum; AIOP number within controller int ChanNum; Channel number within AIOP Return: int: TRUE if initialization succeeded, FALSE if it fails because channel number exceeds number of channels available in AIOP. Comments: This function must be called before a channel can be used. Warnings: No range checking on any of the parameters is done. No context switches are allowed while executing this function. */ int sInitChan( CONTROLLER_T *CtlP, CHANNEL_T *ChP, int AiopNum, int ChanNum) { int i, ChOff; Byte_t *ChR; static Byte_t R[4]; if(ChanNum >= CtlP->AiopNumChan[AiopNum]) return(FALSE); /* exceeds num chans in AIOP */ /* Channel, AIOP, and controller identifiers */ ChP->CtlP = CtlP; ChP->ChanID = CtlP->AiopID[AiopNum]; ChP->AiopNum = AiopNum; ChP->ChanNum = ChanNum; /* Initialize the channel from the RData array */ for(i=0; i < RDATASIZE; i+=4) { R[0] = RData[i]; R[1] = RData[i+1] + 0x10 * ChanNum; R[2] = RData[i+2]; R[3] = RData[i+3]; rp_writech4(ChP,_INDX_ADDR,le32dec(R)); } ChR = ChP->R; for(i=0; i < RREGDATASIZE; i+=4) { ChR[i] = RRegData[i]; ChR[i+1] = RRegData[i+1] + 0x10 * ChanNum; ChR[i+2] = RRegData[i+2]; ChR[i+3] = RRegData[i+3]; } /* Indexed registers */ ChOff = (Word_t)ChanNum * 0x1000; ChP->BaudDiv[0] = (Byte_t)(ChOff + _BAUD); ChP->BaudDiv[1] = (Byte_t)((ChOff + _BAUD) >> 8); ChP->BaudDiv[2] = (Byte_t)BRD9600; ChP->BaudDiv[3] = (Byte_t)(BRD9600 >> 8); rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->BaudDiv)); ChP->TxControl[0] = (Byte_t)(ChOff + _TX_CTRL); ChP->TxControl[1] = (Byte_t)((ChOff + _TX_CTRL) >> 8); ChP->TxControl[2] = 0; ChP->TxControl[3] = 0; rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxControl)); ChP->RxControl[0] = (Byte_t)(ChOff + _RX_CTRL); ChP->RxControl[1] = (Byte_t)((ChOff + _RX_CTRL) >> 8); ChP->RxControl[2] = 0; ChP->RxControl[3] = 0; rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->RxControl)); ChP->TxEnables[0] = (Byte_t)(ChOff + _TX_ENBLS); ChP->TxEnables[1] = (Byte_t)((ChOff + _TX_ENBLS) >> 8); ChP->TxEnables[2] = 0; ChP->TxEnables[3] = 0; rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxEnables)); ChP->TxCompare[0] = (Byte_t)(ChOff + _TXCMP1); ChP->TxCompare[1] = (Byte_t)((ChOff + _TXCMP1) >> 8); ChP->TxCompare[2] = 0; ChP->TxCompare[3] = 0; rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxCompare)); ChP->TxReplace1[0] = (Byte_t)(ChOff + _TXREP1B1); ChP->TxReplace1[1] = (Byte_t)((ChOff + _TXREP1B1) >> 8); ChP->TxReplace1[2] = 0; ChP->TxReplace1[3] = 0; rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxReplace1)); ChP->TxReplace2[0] = (Byte_t)(ChOff + _TXREP2); ChP->TxReplace2[1] = (Byte_t)((ChOff + _TXREP2) >> 8); ChP->TxReplace2[2] = 0; ChP->TxReplace2[3] = 0; rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxReplace2)); ChP->TxFIFOPtrs = ChOff + _TXF_OUTP; ChP->TxFIFO = ChOff + _TX_FIFO; rp_writech1(ChP,_CMD_REG,(Byte_t)ChanNum | RESTXFCNT); /* apply reset Tx FIFO count */ rp_writech1(ChP,_CMD_REG,(Byte_t)ChanNum); /* remove reset Tx FIFO count */ rp_writech2(ChP,_INDX_ADDR,ChP->TxFIFOPtrs); /* clear Tx in/out ptrs */ rp_writech2(ChP,_INDX_DATA,0); ChP->RxFIFOPtrs = ChOff + _RXF_OUTP; ChP->RxFIFO = ChOff + _RX_FIFO; rp_writech1(ChP,_CMD_REG,(Byte_t)ChanNum | RESRXFCNT); /* apply reset Rx FIFO count */ rp_writech1(ChP,_CMD_REG,(Byte_t)ChanNum); /* remove reset Rx FIFO count */ rp_writech2(ChP,_INDX_ADDR,ChP->RxFIFOPtrs); /* clear Rx out ptr */ rp_writech2(ChP,_INDX_DATA,0); rp_writech2(ChP,_INDX_ADDR,ChP->RxFIFOPtrs + 2); /* clear Rx in ptr */ rp_writech2(ChP,_INDX_DATA,0); ChP->TxPrioCnt = ChOff + _TXP_CNT; rp_writech2(ChP,_INDX_ADDR,ChP->TxPrioCnt); rp_writech1(ChP,_INDX_DATA,0); ChP->TxPrioPtr = ChOff + _TXP_PNTR; rp_writech2(ChP,_INDX_ADDR,ChP->TxPrioPtr); rp_writech1(ChP,_INDX_DATA,0); ChP->TxPrioBuf = ChOff + _TXP_BUF; sEnRxProcessor(ChP); /* start the Rx processor */ return(TRUE); } /*************************************************************************** Function: sStopRxProcessor Purpose: Stop the receive processor from processing a channel. Call: sStopRxProcessor(ChP) CHANNEL_T *ChP; Ptr to channel structure Comments: The receive processor can be started again with sStartRxProcessor(). This function causes the receive processor to skip over the stopped channel. It does not stop it from processing other channels. Warnings: No context switches are allowed while executing this function. Do not leave the receive processor stopped for more than one character time. After calling this function a delay of 4 uS is required to ensure that the receive processor is no longer processing this channel. */ void sStopRxProcessor(CHANNEL_T *ChP) { Byte_t R[4]; R[0] = ChP->R[0]; R[1] = ChP->R[1]; R[2] = 0x0a; R[3] = ChP->R[3]; rp_writech4(ChP,_INDX_ADDR,le32dec(R)); } /*************************************************************************** Function: sFlushRxFIFO Purpose: Flush the Rx FIFO Call: sFlushRxFIFO(ChP) CHANNEL_T *ChP; Ptr to channel structure Return: void Comments: To prevent data from being enqueued or dequeued in the Tx FIFO while it is being flushed the receive processor is stopped and the transmitter is disabled. After these operations a 4 uS delay is done before clearing the pointers to allow the receive processor to stop. These items are handled inside this function. Warnings: No context switches are allowed while executing this function. */ void sFlushRxFIFO(CHANNEL_T *ChP) { int i; Byte_t Ch; /* channel number within AIOP */ int RxFIFOEnabled; /* TRUE if Rx FIFO enabled */ if(sGetRxCnt(ChP) == 0) /* Rx FIFO empty */ return; /* don't need to flush */ RxFIFOEnabled = FALSE; if(ChP->R[0x32] == 0x08) /* Rx FIFO is enabled */ { RxFIFOEnabled = TRUE; sDisRxFIFO(ChP); /* disable it */ for(i=0; i < 2000/200; i++) /* delay 2 uS to allow proc to disable FIFO*/ rp_readch1(ChP,_INT_CHAN); /* depends on bus i/o timing */ } sGetChanStatus(ChP); /* clear any pending Rx errors in chan stat */ Ch = (Byte_t)sGetChanNum(ChP); rp_writech1(ChP,_CMD_REG,Ch | RESRXFCNT); /* apply reset Rx FIFO count */ rp_writech1(ChP,_CMD_REG,Ch); /* remove reset Rx FIFO count */ rp_writech2(ChP,_INDX_ADDR,ChP->RxFIFOPtrs); /* clear Rx out ptr */ rp_writech2(ChP,_INDX_DATA,0); rp_writech2(ChP,_INDX_ADDR,ChP->RxFIFOPtrs + 2); /* clear Rx in ptr */ rp_writech2(ChP,_INDX_DATA,0); if(RxFIFOEnabled) sEnRxFIFO(ChP); /* enable Rx FIFO */ } /*************************************************************************** Function: sFlushTxFIFO Purpose: Flush the Tx FIFO Call: sFlushTxFIFO(ChP) CHANNEL_T *ChP; Ptr to channel structure Return: void Comments: To prevent data from being enqueued or dequeued in the Tx FIFO while it is being flushed the receive processor is stopped and the transmitter is disabled. After these operations a 4 uS delay is done before clearing the pointers to allow the receive processor to stop. These items are handled inside this function. Warnings: No context switches are allowed while executing this function. */ void sFlushTxFIFO(CHANNEL_T *ChP) { int i; Byte_t Ch; /* channel number within AIOP */ int TxEnabled; /* TRUE if transmitter enabled */ if(sGetTxCnt(ChP) == 0) /* Tx FIFO empty */ return; /* don't need to flush */ TxEnabled = FALSE; if(ChP->TxControl[3] & TX_ENABLE) { TxEnabled = TRUE; sDisTransmit(ChP); /* disable transmitter */ } sStopRxProcessor(ChP); /* stop Rx processor */ for(i = 0; i < 4000/200; i++) /* delay 4 uS to allow proc to stop */ rp_readch1(ChP,_INT_CHAN); /* depends on bus i/o timing */ Ch = (Byte_t)sGetChanNum(ChP); rp_writech1(ChP,_CMD_REG,Ch | RESTXFCNT); /* apply reset Tx FIFO count */ rp_writech1(ChP,_CMD_REG,Ch); /* remove reset Tx FIFO count */ rp_writech2(ChP,_INDX_ADDR,ChP->TxFIFOPtrs); /* clear Tx in/out ptrs */ rp_writech2(ChP,_INDX_DATA,0); if(TxEnabled) sEnTransmit(ChP); /* enable transmitter */ sStartRxProcessor(ChP); /* restart Rx processor */ } /*************************************************************************** Function: sWriteTxPrioByte Purpose: Write a byte of priority transmit data to a channel Call: sWriteTxPrioByte(ChP,Data) CHANNEL_T *ChP; Ptr to channel structure Byte_t Data; The transmit data byte Return: int: 1 if the bytes is successfully written, otherwise 0. Comments: The priority byte is transmitted before any data in the Tx FIFO. Warnings: No context switches are allowed while executing this function. */ int sWriteTxPrioByte(CHANNEL_T *ChP, Byte_t Data) { Byte_t DWBuf[4]; /* buffer for double word writes */ if(sGetTxCnt(ChP) > 1) /* write it to Tx priority buffer */ { rp_writech2(ChP,_INDX_ADDR,ChP->TxPrioCnt); /* get priority buffer status */ if(rp_readch1(ChP,_INDX_DATA) & PRI_PEND) /* priority buffer busy */ return(0); /* nothing sent */ le16enc(DWBuf,ChP->TxPrioBuf); /* data byte address */ DWBuf[2] = Data; /* data byte value */ DWBuf[3] = 0; /* priority buffer pointer */ rp_writech4(ChP,_INDX_ADDR,le32dec(DWBuf)); /* write it out */ le16enc(DWBuf,ChP->TxPrioCnt); /* Tx priority count address */ DWBuf[2] = PRI_PEND + 1; /* indicate 1 byte pending */ DWBuf[3] = 0; /* priority buffer pointer */ rp_writech4(ChP,_INDX_ADDR,le32dec(DWBuf)); /* write it out */ } else /* write it to Tx FIFO */ { sWriteTxByte(ChP,sGetTxRxDataIO(ChP),Data); } return(1); /* 1 byte sent */ } /*************************************************************************** Function: sEnInterrupts Purpose: Enable one or more interrupts for a channel Call: sEnInterrupts(ChP,Flags) CHANNEL_T *ChP; Ptr to channel structure Word_t Flags: Interrupt enable flags, can be any combination of the following flags: TXINT_EN: Interrupt on Tx FIFO empty RXINT_EN: Interrupt on Rx FIFO at trigger level (see sSetRxTrigger()) SRCINT_EN: Interrupt on SRC (Special Rx Condition) MCINT_EN: Interrupt on modem input change CHANINT_EN: Allow channel interrupt signal to the AIOP's Interrupt Channel Register. Return: void Comments: If an interrupt enable flag is set in Flags, that interrupt will be enabled. If an interrupt enable flag is not set in Flags, that interrupt will not be changed. Interrupts can be disabled with function sDisInterrupts(). This function sets the appropriate bit for the channel in the AIOP's Interrupt Mask Register if the CHANINT_EN flag is set. This allows this channel's bit to be set in the AIOP's Interrupt Channel Register. Interrupts must also be globally enabled before channel interrupts will be passed on to the host. This is done with function sEnGlobalInt(). In some cases it may be desirable to disable interrupts globally but enable channel interrupts. This would allow the global interrupt status register to be used to determine which AIOPs need service. */ void sEnInterrupts(CHANNEL_T *ChP,Word_t Flags) { Byte_t Mask; /* Interrupt Mask Register */ ChP->RxControl[2] |= ((Byte_t)Flags & (RXINT_EN | SRCINT_EN | MCINT_EN)); rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->RxControl)); ChP->TxControl[2] |= ((Byte_t)Flags & TXINT_EN); rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxControl)); if(Flags & CHANINT_EN) { Mask = rp_readch1(ChP,_INT_MASK) | rp_sBitMapSetTbl[ChP->ChanNum]; rp_writech1(ChP,_INT_MASK,Mask); } } /*************************************************************************** Function: sDisInterrupts Purpose: Disable one or more interrupts for a channel Call: sDisInterrupts(ChP,Flags) CHANNEL_T *ChP; Ptr to channel structure Word_t Flags: Interrupt flags, can be any combination of the following flags: TXINT_EN: Interrupt on Tx FIFO empty RXINT_EN: Interrupt on Rx FIFO at trigger level (see sSetRxTrigger()) SRCINT_EN: Interrupt on SRC (Special Rx Condition) MCINT_EN: Interrupt on modem input change CHANINT_EN: Disable channel interrupt signal to the AIOP's Interrupt Channel Register. Return: void Comments: If an interrupt flag is set in Flags, that interrupt will be disabled. If an interrupt flag is not set in Flags, that interrupt will not be changed. Interrupts can be enabled with function sEnInterrupts(). This function clears the appropriate bit for the channel in the AIOP's Interrupt Mask Register if the CHANINT_EN flag is set. This blocks this channel's bit from being set in the AIOP's Interrupt Channel Register. */ void sDisInterrupts(CHANNEL_T *ChP,Word_t Flags) { Byte_t Mask; /* Interrupt Mask Register */ ChP->RxControl[2] &= ~((Byte_t)Flags & (RXINT_EN | SRCINT_EN | MCINT_EN)); rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->RxControl)); ChP->TxControl[2] &= ~((Byte_t)Flags & TXINT_EN); rp_writech4(ChP,_INDX_ADDR,le32dec(ChP->TxControl)); if(Flags & CHANINT_EN) { Mask = rp_readch1(ChP,_INT_MASK) & rp_sBitMapClrTbl[ChP->ChanNum]; rp_writech1(ChP,_INT_MASK,Mask); } } /********************************************************************* Begin FreeBsd-specific driver code **********************************************************************/ #define POLL_INTERVAL (hz / 100) #define RP_ISMULTIPORT(dev) ((dev)->id_flags & 0x1) #define RP_MPMASTER(dev) (((dev)->id_flags >> 8) & 0xff) #define RP_NOTAST4(dev) ((dev)->id_flags & 0x04) /* * The top-level routines begin here */ static void rpclose(struct tty *tp); static void rphardclose(struct tty *tp); static int rpmodem(struct tty *, int, int); static int rpparam(struct tty *, struct termios *); static void rpstart(struct tty *); static int rpioctl(struct tty *, u_long, caddr_t, struct thread *); static int rpopen(struct tty *); static void rp_do_receive(struct rp_port *rp, struct tty *tp, CHANNEL_t *cp, unsigned int ChanStatus) { unsigned int CharNStat; int ToRecv, ch, err = 0; ToRecv = sGetRxCnt(cp); if(ToRecv == 0) return; /* If status indicates there are errored characters in the FIFO, then enter status mode (a word in FIFO holds characters and status) */ if(ChanStatus & (RXFOVERFL | RXBREAK | RXFRAME | RXPARITY)) { if(!(ChanStatus & STATMODE)) { ChanStatus |= STATMODE; sEnRxStatusMode(cp); } } /* if we previously entered status mode then read down the FIFO one word at a time, pulling apart the character and the status. Update error counters depending on status. */ tty_lock(tp); if(ChanStatus & STATMODE) { while(ToRecv) { CharNStat = rp_readch2(cp,sGetTxRxDataIO(cp)); ch = CharNStat & 0xff; if((CharNStat & STMBREAK) || (CharNStat & STMFRAMEH)) err |= TRE_FRAMING; else if (CharNStat & STMPARITYH) err |= TRE_PARITY; else if (CharNStat & STMRCVROVRH) { rp->rp_overflows++; err |= TRE_OVERRUN; } ttydisc_rint(tp, ch, err); ToRecv--; } /* After emtying FIFO in status mode, turn off status mode */ if(sGetRxCnt(cp) == 0) { sDisRxStatusMode(cp); } } else { ToRecv = sGetRxCnt(cp); while (ToRecv) { ch = rp_readch1(cp,sGetTxRxDataIO(cp)); ttydisc_rint(tp, ch & 0xff, err); ToRecv--; } } ttydisc_rint_done(tp); tty_unlock(tp); } static void rp_handle_port(struct rp_port *rp) { CHANNEL_t *cp; struct tty *tp; unsigned int IntMask, ChanStatus; if(!rp) return; cp = &rp->rp_channel; tp = rp->rp_tty; IntMask = sGetChanIntID(cp); IntMask = IntMask & rp->rp_intmask; ChanStatus = sGetChanStatus(cp); if(IntMask & RXF_TRIG) rp_do_receive(rp, tp, cp, ChanStatus); if(IntMask & DELTA_CD) { if(ChanStatus & CD_ACT) { (void)ttydisc_modem(tp, 1); } else { (void)ttydisc_modem(tp, 0); } } /* oldcts = rp->rp_cts; rp->rp_cts = ((ChanStatus & CTS_ACT) != 0); if(oldcts != rp->rp_cts) { printf("CTS change (now %s)... on port %d\n", rp->rp_cts ? "on" : "off", rp->rp_port); } */ } static void rp_do_poll(void *arg) { CONTROLLER_t *ctl; struct rp_port *rp; struct tty *tp; int count; unsigned char CtlMask, AiopMask; rp = arg; tp = rp->rp_tty; tty_lock_assert(tp, MA_OWNED); ctl = rp->rp_ctlp; CtlMask = ctl->ctlmask(ctl); if (CtlMask & (1 << rp->rp_aiop)) { AiopMask = sGetAiopIntStatus(ctl, rp->rp_aiop); if (AiopMask & (1 << rp->rp_chan)) { rp_handle_port(rp); } } count = sGetTxCnt(&rp->rp_channel); if (count >= 0 && (count <= rp->rp_restart)) { rpstart(tp); } callout_schedule(&rp->rp_timer, POLL_INTERVAL); } static struct ttydevsw rp_tty_class = { .tsw_flags = TF_INITLOCK|TF_CALLOUT, .tsw_open = rpopen, .tsw_close = rpclose, .tsw_outwakeup = rpstart, .tsw_ioctl = rpioctl, .tsw_param = rpparam, .tsw_modem = rpmodem, .tsw_free = rpfree, }; static void rpfree(void *softc) { struct rp_port *rp = softc; CONTROLLER_t *ctlp = rp->rp_ctlp; atomic_subtract_32(&ctlp->free, 1); } int rp_attachcommon(CONTROLLER_T *ctlp, int num_aiops, int num_ports) { int unit; int num_chan; int aiop, chan, port; int ChanStatus; int retval; struct rp_port *rp; struct tty *tp; unit = device_get_unit(ctlp->dev); printf("RocketPort%d (Version %s) %d ports.\n", unit, RocketPortVersion, num_ports); ctlp->num_ports = num_ports; ctlp->rp = rp = (struct rp_port *) - malloc(sizeof(struct rp_port) * num_ports, M_DEVBUF, M_NOWAIT | M_ZERO); + mallocarray(num_ports, sizeof(struct rp_port), M_DEVBUF, + M_NOWAIT | M_ZERO); if (rp == NULL) { device_printf(ctlp->dev, "rp_attachcommon: Could not malloc rp_ports structures.\n"); retval = ENOMEM; goto nogo; } port = 0; for(aiop=0; aiop < num_aiops; aiop++) { num_chan = sGetAiopNumChan(ctlp, aiop); for(chan=0; chan < num_chan; chan++, port++, rp++) { rp->rp_tty = tp = tty_alloc(&rp_tty_class, rp); callout_init_mtx(&rp->rp_timer, tty_getlock(tp), 0); rp->rp_port = port; rp->rp_ctlp = ctlp; rp->rp_unit = unit; rp->rp_chan = chan; rp->rp_aiop = aiop; rp->rp_intmask = RXF_TRIG | TXFIFO_MT | SRC_INT | DELTA_CD | DELTA_CTS | DELTA_DSR; #ifdef notdef ChanStatus = sGetChanStatus(&rp->rp_channel); #endif /* notdef */ if(sInitChan(ctlp, &rp->rp_channel, aiop, chan) == 0) { device_printf(ctlp->dev, "RocketPort sInitChan(%d, %d, %d) failed.\n", unit, aiop, chan); retval = ENXIO; goto nogo; } ChanStatus = sGetChanStatus(&rp->rp_channel); rp->rp_cts = (ChanStatus & CTS_ACT) != 0; tty_makedev(tp, NULL, "R%r%r", unit, port); } } mtx_init(&ctlp->hwmtx, "rp_hwmtx", NULL, MTX_DEF); ctlp->hwmtx_init = 1; return (0); nogo: rp_releaseresource(ctlp); return (retval); } void rp_releaseresource(CONTROLLER_t *ctlp) { struct rp_port *rp; int i; if (ctlp->rp != NULL) { for (i = 0; i < ctlp->num_ports; i++) { rp = ctlp->rp + i; atomic_add_32(&ctlp->free, 1); tty_lock(rp->rp_tty); tty_rel_gone(rp->rp_tty); } free(ctlp->rp, M_DEVBUF); ctlp->rp = NULL; } while (ctlp->free != 0) { pause("rpwt", hz / 10); } if (ctlp->hwmtx_init) mtx_destroy(&ctlp->hwmtx); } static int rpopen(struct tty *tp) { struct rp_port *rp; int flags; unsigned int IntMask, ChanStatus; rp = tty_softc(tp); flags = 0; flags |= SET_RTS; flags |= SET_DTR; rp->rp_channel.TxControl[3] = ((rp->rp_channel.TxControl[3] & ~(SET_RTS | SET_DTR)) | flags); rp_writech4(&rp->rp_channel,_INDX_ADDR, le32dec(rp->rp_channel.TxControl)); sSetRxTrigger(&rp->rp_channel, TRIG_1); sDisRxStatusMode(&rp->rp_channel); sFlushRxFIFO(&rp->rp_channel); sFlushTxFIFO(&rp->rp_channel); sEnInterrupts(&rp->rp_channel, (TXINT_EN|MCINT_EN|RXINT_EN|SRCINT_EN|CHANINT_EN)); sSetRxTrigger(&rp->rp_channel, TRIG_1); sDisRxStatusMode(&rp->rp_channel); sClrTxXOFF(&rp->rp_channel); /* sDisRTSFlowCtl(&rp->rp_channel); sDisCTSFlowCtl(&rp->rp_channel); */ sDisTxSoftFlowCtl(&rp->rp_channel); sStartRxProcessor(&rp->rp_channel); sEnRxFIFO(&rp->rp_channel); sEnTransmit(&rp->rp_channel); /* sSetDTR(&rp->rp_channel); sSetRTS(&rp->rp_channel); */ IntMask = sGetChanIntID(&rp->rp_channel); IntMask = IntMask & rp->rp_intmask; ChanStatus = sGetChanStatus(&rp->rp_channel); callout_reset(&rp->rp_timer, POLL_INTERVAL, rp_do_poll, rp); device_busy(rp->rp_ctlp->dev); return(0); } static void rpclose(struct tty *tp) { struct rp_port *rp; rp = tty_softc(tp); callout_stop(&rp->rp_timer); rphardclose(tp); device_unbusy(rp->rp_ctlp->dev); } static void rphardclose(struct tty *tp) { struct rp_port *rp; CHANNEL_t *cp; rp = tty_softc(tp); cp = &rp->rp_channel; sFlushRxFIFO(cp); sFlushTxFIFO(cp); sDisTransmit(cp); sDisInterrupts(cp, TXINT_EN|MCINT_EN|RXINT_EN|SRCINT_EN|CHANINT_EN); sDisRTSFlowCtl(cp); sDisCTSFlowCtl(cp); sDisTxSoftFlowCtl(cp); sClrTxXOFF(cp); #ifdef DJA if(tp->t_cflag&HUPCL || !(tp->t_state&TS_ISOPEN) || !tp->t_actout) { sClrDTR(cp); } if(ISCALLOUT(tp->t_dev)) { sClrDTR(cp); } tp->t_actout = FALSE; wakeup(&tp->t_actout); wakeup(TSA_CARR_ON(tp)); #endif /* DJA */ } static int rpioctl(struct tty *tp, u_long cmd, caddr_t data, struct thread *td) { struct rp_port *rp; rp = tty_softc(tp); switch (cmd) { case TIOCSBRK: sSendBreak(&rp->rp_channel); return (0); case TIOCCBRK: sClrBreak(&rp->rp_channel); return (0); default: return ENOIOCTL; } } static int rpmodem(struct tty *tp, int sigon, int sigoff) { struct rp_port *rp; int i, j, k; rp = tty_softc(tp); if (sigon != 0 || sigoff != 0) { i = j = 0; if (sigon & SER_DTR) i = SET_DTR; if (sigoff & SER_DTR) j = SET_DTR; if (sigon & SER_RTS) i = SET_RTS; if (sigoff & SER_RTS) j = SET_RTS; rp->rp_channel.TxControl[3] &= ~i; rp->rp_channel.TxControl[3] |= j; rp_writech4(&rp->rp_channel,_INDX_ADDR, le32dec(rp->rp_channel.TxControl)); } else { i = sGetChanStatusLo(&rp->rp_channel); j = rp->rp_channel.TxControl[3]; k = 0; if (j & SET_DTR) k |= SER_DTR; if (j & SET_RTS) k |= SER_RTS; if (i & CD_ACT) k |= SER_DCD; if (i & DSR_ACT) k |= SER_DSR; if (i & CTS_ACT) k |= SER_CTS; return(k); } return (0); } static struct { int baud; int conversion; } baud_table[] = { {B0, 0}, {B50, BRD50}, {B75, BRD75}, {B110, BRD110}, {B134, BRD134}, {B150, BRD150}, {B200, BRD200}, {B300, BRD300}, {B600, BRD600}, {B1200, BRD1200}, {B1800, BRD1800}, {B2400, BRD2400}, {B4800, BRD4800}, {B9600, BRD9600}, {B19200, BRD19200}, {B38400, BRD38400}, {B7200, BRD7200}, {B14400, BRD14400}, {B57600, BRD57600}, {B76800, BRD76800}, {B115200, BRD115200}, {B230400, BRD230400}, {-1, -1} }; static int rp_convert_baud(int baud) { int i; for (i = 0; baud_table[i].baud >= 0; i++) { if (baud_table[i].baud == baud) break; } return baud_table[i].conversion; } static int rpparam(tp, t) struct tty *tp; struct termios *t; { struct rp_port *rp; CHANNEL_t *cp; int cflag, iflag, oflag, lflag; int ospeed; #ifdef RPCLOCAL int devshift; #endif rp = tty_softc(tp); cp = &rp->rp_channel; cflag = t->c_cflag; #ifdef RPCLOCAL devshift = umynor / 32; devshift = 1 << devshift; if ( devshift & RPCLOCAL ) { cflag |= CLOCAL; } #endif iflag = t->c_iflag; oflag = t->c_oflag; lflag = t->c_lflag; ospeed = rp_convert_baud(t->c_ispeed); if(ospeed < 0 || t->c_ispeed != t->c_ospeed) return(EINVAL); if(t->c_ospeed == 0) { sClrDTR(cp); return(0); } rp->rp_fifo_lw = ((t->c_ospeed*2) / 1000) +1; /* Set baud rate ----- we only pay attention to ispeed */ sSetDTR(cp); sSetRTS(cp); sSetBaud(cp, ospeed); if(cflag & CSTOPB) { sSetStop2(cp); } else { sSetStop1(cp); } if(cflag & PARENB) { sEnParity(cp); if(cflag & PARODD) { sSetOddParity(cp); } else { sSetEvenParity(cp); } } else { sDisParity(cp); } if((cflag & CSIZE) == CS8) { sSetData8(cp); rp->rp_imask = 0xFF; } else { sSetData7(cp); rp->rp_imask = 0x7F; } if(iflag & ISTRIP) { rp->rp_imask &= 0x7F; } if(cflag & CLOCAL) { rp->rp_intmask &= ~DELTA_CD; } else { rp->rp_intmask |= DELTA_CD; } /* Put flow control stuff here */ if(cflag & CCTS_OFLOW) { sEnCTSFlowCtl(cp); } else { sDisCTSFlowCtl(cp); } if(cflag & CRTS_IFLOW) { rp->rp_rts_iflow = 1; } else { rp->rp_rts_iflow = 0; } if(cflag & CRTS_IFLOW) { sEnRTSFlowCtl(cp); } else { sDisRTSFlowCtl(cp); } return(0); } static void rpstart(struct tty *tp) { struct rp_port *rp; CHANNEL_t *cp; char flags; int xmit_fifo_room; int i, count, wcount; rp = tty_softc(tp); cp = &rp->rp_channel; flags = rp->rp_channel.TxControl[3]; if(rp->rp_xmit_stopped) { sEnTransmit(cp); rp->rp_xmit_stopped = 0; } xmit_fifo_room = TXFIFO_SIZE - sGetTxCnt(cp); count = ttydisc_getc(tp, &rp->TxBuf, xmit_fifo_room); if(xmit_fifo_room > 0) { for( i = 0, wcount = count >> 1; wcount > 0; i += 2, wcount-- ) { rp_writech2(cp, sGetTxRxDataIO(cp), le16dec(&rp->TxBuf[i])); } if ( count & 1 ) { rp_writech1(cp, sGetTxRxDataIO(cp), rp->TxBuf[(count-1)]); } } } Index: head/sys/dev/rp/rp_isa.c =================================================================== --- head/sys/dev/rp/rp_isa.c (revision 327948) +++ head/sys/dev/rp/rp_isa.c (revision 327949) @@ -1,507 +1,509 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) Comtrol Corporation * All rights reserved. * * ISA-specific part separated from: * sys/i386/isa/rp.c,v 1.33 1999/09/28 11:45:27 phk Exp * * Redistribution and use in source and binary forms, with or without * modification, are permitted prodived that the follwoing conditions * are met. * 1. Redistributions of source code must retain the above copyright * notive, this list of conditions and the following disclainer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials prodided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Comtrol Corporation. * 4. The name of Comtrol Corporation may not be used to endorse or * promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY COMTROL CORPORATION ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL COMTROL CORPORATION BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, LIFE OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #define ROCKET_C #include #include #include /* ISA-specific part of CONTROLLER_t */ struct ISACONTROLLER_T { int MBaseIO; /* rid of the Mudbac controller for this controller */ int MReg0IO; /* offset0 of the Mudbac controller for this controller */ int MReg1IO; /* offset1 of the Mudbac controller for this controller */ int MReg2IO; /* offset2 of the Mudbac controller for this controller */ int MReg3IO; /* offset3 of the Mudbac controller for this controller */ Byte_t MReg2; Byte_t MReg3; }; typedef struct ISACONTROLLER_T ISACONTROLLER_t; #define ISACTL(ctlp) ((ISACONTROLLER_t *)((ctlp)->bus_ctlp)) /*************************************************************************** Function: sControllerEOI Purpose: Strobe the MUDBAC's End Of Interrupt bit. Call: sControllerEOI(MudbacCtlP,CtlP) CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure CONTROLLER_T *CtlP; Ptr to controller structure */ #define sControllerEOI(MudbacCtlP,CtlP) \ rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg2IO,ISACTL(CtlP)->MReg2 | INT_STROB) /*************************************************************************** Function: sDisAiop Purpose: Disable I/O access to an AIOP Call: sDisAiop(MudbacCtlP,CtlP) CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure CONTROLLER_T *CtlP; Ptr to controller structure int AiopNum; Number of AIOP on controller */ #define sDisAiop(MudbacCtlP,CtlP,AIOPNUM) \ { \ ISACTL(CtlP)->MReg3 &= rp_sBitMapClrTbl[AIOPNUM]; \ rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg3IO,ISACTL(CtlP)->MReg3); \ } /*************************************************************************** Function: sEnAiop Purpose: Enable I/O access to an AIOP Call: sEnAiop(MudbacCtlP,CtlP) CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure CONTROLLER_T *CtlP; Ptr to controller structure int AiopNum; Number of AIOP on controller */ #define sEnAiop(MudbacCtlP,CtlP,AIOPNUM) \ { \ ISACTL(CtlP)->MReg3 |= rp_sBitMapSetTbl[AIOPNUM]; \ rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg3IO,ISACTL(CtlP)->MReg3); \ } /*************************************************************************** Function: sGetControllerIntStatus Purpose: Get the controller interrupt status Call: sGetControllerIntStatus(MudbacCtlP,CtlP) CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure CONTROLLER_T *CtlP; Ptr to controller structure Return: Byte_t: The controller interrupt status in the lower 4 bits. Bits 0 through 3 represent AIOP's 0 through 3 respectively. If a bit is set that AIOP is interrupting. Bits 4 through 7 will always be cleared. */ #define sGetControllerIntStatus(MudbacCtlP,CtlP) \ (rp_readio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg1IO) & 0x0f) static devclass_t rp_devclass; static CONTROLLER_t *rp_controller; static int rp_nisadevs; static int rp_probe(device_t dev); static int rp_attach(device_t dev); static void rp_isareleaseresource(CONTROLLER_t *ctlp); static int sInitController(CONTROLLER_T *CtlP, CONTROLLER_T *MudbacCtlP, int AiopNum, int IRQNum, Byte_t Frequency, int PeriodicOnly); static rp_aiop2rid_t rp_isa_aiop2rid; static rp_aiop2off_t rp_isa_aiop2off; static rp_ctlmask_t rp_isa_ctlmask; static int rp_probe(device_t dev) { int unit; CONTROLLER_t *controller; int num_aiops; CONTROLLER_t *ctlp; int retval; /* * We have no PnP RocketPort cards. * (At least according to LINT) */ if (isa_get_logicalid(dev) != 0) return (ENXIO); /* We need IO port resource to configure an ISA device. */ if (bus_get_resource_count(dev, SYS_RES_IOPORT, 0) == 0) return (ENXIO); unit = device_get_unit(dev); if (unit >= 4) { device_printf(dev, "rpprobe: unit number %d invalid.\n", unit); return (ENXIO); } device_printf(dev, "probing for RocketPort(ISA) unit %d.\n", unit); ctlp = device_get_softc(dev); bzero(ctlp, sizeof(*ctlp)); ctlp->dev = dev; ctlp->aiop2rid = rp_isa_aiop2rid; ctlp->aiop2off = rp_isa_aiop2off; ctlp->ctlmask = rp_isa_ctlmask; /* The IO ports of AIOPs for an ISA controller are discrete. */ ctlp->io_num = 1; - ctlp->io_rid = malloc(sizeof(*(ctlp->io_rid)) * MAX_AIOPS_PER_BOARD, M_DEVBUF, M_NOWAIT | M_ZERO); - ctlp->io = malloc(sizeof(*(ctlp->io)) * MAX_AIOPS_PER_BOARD, M_DEVBUF, M_NOWAIT | M_ZERO); + ctlp->io_rid = mallocarray(MAX_AIOPS_PER_BOARD, sizeof(*(ctlp->io_rid)), + M_DEVBUF, M_NOWAIT | M_ZERO); + ctlp->io = mallocarray(MAX_AIOPS_PER_BOARD, sizeof(*(ctlp->io)), + M_DEVBUF, M_NOWAIT | M_ZERO); if (ctlp->io_rid == NULL || ctlp->io == NULL) { device_printf(dev, "rp_attach: Out of memory.\n"); retval = ENOMEM; goto nogo; } ctlp->bus_ctlp = malloc(sizeof(ISACONTROLLER_t) * 1, M_DEVBUF, M_NOWAIT | M_ZERO); if (ctlp->bus_ctlp == NULL) { device_printf(dev, "rp_attach: Out of memory.\n"); retval = ENOMEM; goto nogo; } ctlp->io_rid[0] = 0; if (rp_controller != NULL) { controller = rp_controller; ctlp->io[0] = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &ctlp->io_rid[0], 0x40, RF_ACTIVE); } else { controller = rp_controller = ctlp; ctlp->io[0] = bus_alloc_resource_anywhere(dev, SYS_RES_IOPORT, &ctlp->io_rid[0], 0x44, RF_ACTIVE); } if (ctlp->io[0] == NULL) { device_printf(dev, "rp_attach: Resource not available.\n"); retval = ENXIO; goto nogo; } num_aiops = sInitController(ctlp, controller, MAX_AIOPS_PER_BOARD, 0, FREQ_DIS, 0); if (num_aiops <= 0) { device_printf(dev, "board%d init failed.\n", unit); retval = ENXIO; goto nogo; } if (rp_controller == NULL) rp_controller = controller; rp_nisadevs++; device_set_desc(dev, "RocketPort ISA"); return (0); nogo: rp_isareleaseresource(ctlp); return (retval); } static int rp_attach(device_t dev) { int unit; int num_ports, num_aiops; int aiop; CONTROLLER_t *ctlp; int retval; unit = device_get_unit(dev); ctlp = device_get_softc(dev); #ifdef notdef num_aiops = sInitController(ctlp, rp_controller, MAX_AIOPS_PER_BOARD, 0, FREQ_DIS, 0); #else num_aiops = ctlp->NumAiop; #endif /* notdef */ num_ports = 0; for(aiop=0; aiop < num_aiops; aiop++) { sResetAiopByNum(ctlp, aiop); sEnAiop(rp_controller, ctlp, aiop); num_ports += sGetAiopNumChan(ctlp, aiop); } retval = rp_attachcommon(ctlp, num_aiops, num_ports); if (retval != 0) goto nogo; return (0); nogo: rp_isareleaseresource(ctlp); return (retval); } static void rp_isareleaseresource(CONTROLLER_t *ctlp) { int i; rp_releaseresource(ctlp); if (ctlp == rp_controller) rp_controller = NULL; if (ctlp->io != NULL) { for (i = 0 ; i < MAX_AIOPS_PER_BOARD ; i++) if (ctlp->io[i] != NULL) bus_release_resource(ctlp->dev, SYS_RES_IOPORT, ctlp->io_rid[i], ctlp->io[i]); free(ctlp->io, M_DEVBUF); } if (ctlp->io_rid != NULL) free(ctlp->io_rid, M_DEVBUF); if (rp_controller != NULL && rp_controller->io[ISACTL(ctlp)->MBaseIO] != NULL) { bus_release_resource(rp_controller->dev, SYS_RES_IOPORT, rp_controller->io_rid[ISACTL(ctlp)->MBaseIO], rp_controller->io[ISACTL(ctlp)->MBaseIO]); rp_controller->io[ISACTL(ctlp)->MBaseIO] = NULL; rp_controller->io_rid[ISACTL(ctlp)->MBaseIO] = 0; } if (ctlp->bus_ctlp != NULL) free(ctlp->bus_ctlp, M_DEVBUF); } /*************************************************************************** Function: sInitController Purpose: Initialization of controller global registers and controller structure. Call: sInitController(CtlP,MudbacCtlP,AiopNum, IRQNum,Frequency,PeriodicOnly) CONTROLLER_T *CtlP; Ptr to controller structure CONTROLLER_T *MudbacCtlP; Ptr to Mudbac controller structure int AiopNum; Number of Aiops int IRQNum; Interrupt Request number. Can be any of the following: 0: Disable global interrupts 3: IRQ 3 4: IRQ 4 5: IRQ 5 9: IRQ 9 10: IRQ 10 11: IRQ 11 12: IRQ 12 15: IRQ 15 Byte_t Frequency: A flag identifying the frequency of the periodic interrupt, can be any one of the following: FREQ_DIS - periodic interrupt disabled FREQ_137HZ - 137 Hertz FREQ_69HZ - 69 Hertz FREQ_34HZ - 34 Hertz FREQ_17HZ - 17 Hertz FREQ_9HZ - 9 Hertz FREQ_4HZ - 4 Hertz If IRQNum is set to 0 the Frequency parameter is overidden, it is forced to a value of FREQ_DIS. int PeriodicOnly: TRUE if all interrupts except the periodic interrupt are to be blocked. FALSE is both the periodic interrupt and other channel interrupts are allowed. If IRQNum is set to 0 the PeriodicOnly parameter is overidden, it is forced to a value of FALSE. Return: int: Number of AIOPs on the controller, or CTLID_NULL if controller initialization failed. Comments: If periodic interrupts are to be disabled but AIOP interrupts are allowed, set Frequency to FREQ_DIS and PeriodicOnly to FALSE. If interrupts are to be completely disabled set IRQNum to 0. Setting Frequency to FREQ_DIS and PeriodicOnly to TRUE is an invalid combination. This function performs initialization of global interrupt modes, but it does not actually enable global interrupts. To enable and disable global interrupts use functions sEnGlobalInt() and sDisGlobalInt(). Enabling of global interrupts is normally not done until all other initializations are complete. Even if interrupts are globally enabled, they must also be individually enabled for each channel that is to generate interrupts. Warnings: No range checking on any of the parameters is done. No context switches are allowed while executing this function. After this function all AIOPs on the controller are disabled, they can be enabled with sEnAiop(). */ static int sInitController( CONTROLLER_T *CtlP, CONTROLLER_T *MudbacCtlP, int AiopNum, int IRQNum, Byte_t Frequency, int PeriodicOnly) { int i; int ctl_base, aiop_base, aiop_size; CtlP->CtlID = CTLID_0001; /* controller release 1 */ ISACTL(CtlP)->MBaseIO = rp_nisadevs; if (MudbacCtlP->io[ISACTL(CtlP)->MBaseIO] != NULL) { ISACTL(CtlP)->MReg0IO = 0x40 + 0; ISACTL(CtlP)->MReg1IO = 0x40 + 1; ISACTL(CtlP)->MReg2IO = 0x40 + 2; ISACTL(CtlP)->MReg3IO = 0x40 + 3; } else { MudbacCtlP->io_rid[ISACTL(CtlP)->MBaseIO] = ISACTL(CtlP)->MBaseIO; ctl_base = rman_get_start(MudbacCtlP->io[0]) + 0x40 + 0x400 * rp_nisadevs; MudbacCtlP->io[ISACTL(CtlP)->MBaseIO] = bus_alloc_resource(MudbacCtlP->dev, SYS_RES_IOPORT, &CtlP->io_rid[ISACTL(CtlP)->MBaseIO], ctl_base, ctl_base + 3, 4, RF_ACTIVE); ISACTL(CtlP)->MReg0IO = 0; ISACTL(CtlP)->MReg1IO = 1; ISACTL(CtlP)->MReg2IO = 2; ISACTL(CtlP)->MReg3IO = 3; } #if 1 ISACTL(CtlP)->MReg2 = 0; /* interrupt disable */ ISACTL(CtlP)->MReg3 = 0; /* no periodic interrupts */ #else if(sIRQMap[IRQNum] == 0) /* interrupts globally disabled */ { ISACTL(CtlP)->MReg2 = 0; /* interrupt disable */ ISACTL(CtlP)->MReg3 = 0; /* no periodic interrupts */ } else { ISACTL(CtlP)->MReg2 = sIRQMap[IRQNum]; /* set IRQ number */ ISACTL(CtlP)->MReg3 = Frequency; /* set frequency */ if(PeriodicOnly) /* periodic interrupt only */ { ISACTL(CtlP)->MReg3 |= PERIODIC_ONLY; } } #endif rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg2IO,ISACTL(CtlP)->MReg2); rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO,ISACTL(CtlP)->MReg3IO,ISACTL(CtlP)->MReg3); sControllerEOI(MudbacCtlP,CtlP); /* clear EOI if warm init */ /* Init AIOPs */ CtlP->NumAiop = 0; for(i=0; i < AiopNum; i++) { if (CtlP->io[i] == NULL) { CtlP->io_rid[i] = i; aiop_base = rman_get_start(CtlP->io[0]) + 0x400 * i; if (rp_nisadevs == 0) aiop_size = 0x44; else aiop_size = 0x40; CtlP->io[i] = bus_alloc_resource(CtlP->dev, SYS_RES_IOPORT, &CtlP->io_rid[i], aiop_base, aiop_base + aiop_size - 1, aiop_size, RF_ACTIVE); } else aiop_base = rman_get_start(CtlP->io[i]); rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO, ISACTL(CtlP)->MReg2IO, ISACTL(CtlP)->MReg2 | (i & 0x03)); /* AIOP index */ rp_writeio1(MudbacCtlP,ISACTL(CtlP)->MBaseIO, ISACTL(CtlP)->MReg0IO, (Byte_t)(aiop_base >> 6)); /* set up AIOP I/O in MUDBAC */ sEnAiop(MudbacCtlP,CtlP,i); /* enable the AIOP */ CtlP->AiopID[i] = sReadAiopID(CtlP, i); /* read AIOP ID */ if(CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */ { sDisAiop(MudbacCtlP,CtlP,i); /* disable AIOP */ bus_release_resource(CtlP->dev, SYS_RES_IOPORT, CtlP->io_rid[i], CtlP->io[i]); CtlP->io[i] = NULL; break; /* done looking for AIOPs */ } CtlP->AiopNumChan[i] = sReadAiopNumChan(CtlP, i); /* num channels in AIOP */ rp_writeaiop2(CtlP,i,_INDX_ADDR,_CLK_PRE); /* clock prescaler */ rp_writeaiop1(CtlP,i,_INDX_DATA,CLOCK_PRESC); CtlP->NumAiop++; /* bump count of AIOPs */ sDisAiop(MudbacCtlP,CtlP,i); /* disable AIOP */ } if(CtlP->NumAiop == 0) return(-1); else return(CtlP->NumAiop); } /* * ARGSUSED * Maps (aiop, offset) to rid. */ static int rp_isa_aiop2rid(int aiop, int offset) { /* rid equals to aiop for an ISA controller. */ return aiop; } /* * ARGSUSED * Maps (aiop, offset) to the offset of resource. */ static int rp_isa_aiop2off(int aiop, int offset) { /* Each aiop has its own resource. */ return offset; } /* Read the int status for an ISA controller. */ static unsigned char rp_isa_ctlmask(CONTROLLER_t *ctlp) { return sGetControllerIntStatus(rp_controller,ctlp); } static device_method_t rp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rp_probe), DEVMETHOD(device_attach, rp_attach), { 0, 0 } }; static driver_t rp_driver = { "rp", rp_methods, sizeof(CONTROLLER_t), }; /* * rp can be attached to an isa bus. */ DRIVER_MODULE(rp, isa, rp_driver, rp_devclass, 0, 0); Index: head/sys/dev/rp/rp_pci.c =================================================================== --- head/sys/dev/rp/rp_pci.c (revision 327948) +++ head/sys/dev/rp/rp_pci.c (revision 327949) @@ -1,367 +1,369 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) Comtrol Corporation * All rights reserved. * * PCI-specific part separated from: * sys/i386/isa/rp.c,v 1.33 1999/09/28 11:45:27 phk Exp * * Redistribution and use in source and binary forms, with or without * modification, are permitted prodived that the follwoing conditions * are met. * 1. Redistributions of source code must retain the above copyright * notive, this list of conditions and the following disclainer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials prodided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Comtrol Corporation. * 4. The name of Comtrol Corporation may not be used to endorse or * promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY COMTROL CORPORATION ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL COMTROL CORPORATION BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, LIFE OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #define ROCKET_C #include #include #include #include /* PCI IDs */ #define RP_VENDOR_ID 0x11FE #define RP_DEVICE_ID_32I 0x0001 #define RP_DEVICE_ID_8I 0x0002 #define RP_DEVICE_ID_16I 0x0003 #define RP_DEVICE_ID_4Q 0x0004 #define RP_DEVICE_ID_8O 0x0005 #define RP_DEVICE_ID_8J 0x0006 #define RP_DEVICE_ID_4J 0x0007 #define RP_DEVICE_ID_6M 0x000C #define RP_DEVICE_ID_4M 0x000D #define RP_DEVICE_ID_UPCI_32 0x0801 #define RP_DEVICE_ID_UPCI_16 0x0803 #define RP_DEVICE_ID_UPCI_8O 0x0805 /************************************************************************** MUDBAC remapped for PCI **************************************************************************/ #define _CFG_INT_PCI 0x40 #define _PCI_INT_FUNC 0x3A #define PCI_STROB 0x2000 #define INTR_EN_PCI 0x0010 /*************************************************************************** Function: sPCIControllerEOI Purpose: Strobe the MUDBAC's End Of Interrupt bit. Call: sPCIControllerEOI(CtlP) CONTROLLER_T *CtlP; Ptr to controller structure */ #define sPCIControllerEOI(CtlP) rp_writeio2(CtlP, 0, _PCI_INT_FUNC, PCI_STROB) /*************************************************************************** Function: sPCIGetControllerIntStatus Purpose: Get the controller interrupt status Call: sPCIGetControllerIntStatus(CtlP) CONTROLLER_T *CtlP; Ptr to controller structure Return: Byte_t: The controller interrupt status in the lower 4 bits. Bits 0 through 3 represent AIOP's 0 through 3 respectively. If a bit is set that AIOP is interrupting. Bits 4 through 7 will always be cleared. */ #define sPCIGetControllerIntStatus(CTLP) ((rp_readio2(CTLP, 0, _PCI_INT_FUNC) >> 8) & 0x1f) static devclass_t rp_devclass; static int rp_pciprobe(device_t dev); static int rp_pciattach(device_t dev); #ifdef notdef static int rp_pcidetach(device_t dev); static int rp_pcishutdown(device_t dev); #endif /* notdef */ static void rp_pcireleaseresource(CONTROLLER_t *ctlp); static int sPCIInitController( CONTROLLER_t *CtlP, int AiopNum, int IRQNum, Byte_t Frequency, int PeriodicOnly, int VendorDevice); static rp_aiop2rid_t rp_pci_aiop2rid; static rp_aiop2off_t rp_pci_aiop2off; static rp_ctlmask_t rp_pci_ctlmask; /* * The following functions are the pci-specific part * of rp driver. */ static int rp_pciprobe(device_t dev) { char *s; s = NULL; if (pci_get_vendor(dev) == RP_VENDOR_ID) s = "RocketPort PCI"; if (s != NULL) { device_set_desc(dev, s); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int rp_pciattach(device_t dev) { int num_ports, num_aiops; int aiop; CONTROLLER_t *ctlp; int unit; int retval; ctlp = device_get_softc(dev); bzero(ctlp, sizeof(*ctlp)); ctlp->dev = dev; unit = device_get_unit(dev); ctlp->aiop2rid = rp_pci_aiop2rid; ctlp->aiop2off = rp_pci_aiop2off; ctlp->ctlmask = rp_pci_ctlmask; /* The IO ports of AIOPs for a PCI controller are continuous. */ ctlp->io_num = 1; - ctlp->io_rid = malloc(sizeof(*(ctlp->io_rid)) * ctlp->io_num, M_DEVBUF, M_NOWAIT | M_ZERO); - ctlp->io = malloc(sizeof(*(ctlp->io)) * ctlp->io_num, M_DEVBUF, M_NOWAIT | M_ZERO); + ctlp->io_rid = mallocarray(ctlp->io_num, sizeof(*(ctlp->io_rid)), + M_DEVBUF, M_NOWAIT | M_ZERO); + ctlp->io = mallocarray(ctlp->io_num, sizeof(*(ctlp->io)), M_DEVBUF, + M_NOWAIT | M_ZERO); if (ctlp->io_rid == NULL || ctlp->io == NULL) { device_printf(dev, "rp_pciattach: Out of memory.\n"); retval = ENOMEM; goto nogo; } ctlp->bus_ctlp = NULL; switch (pci_get_device(dev)) { case RP_DEVICE_ID_UPCI_16: case RP_DEVICE_ID_UPCI_32: case RP_DEVICE_ID_UPCI_8O: ctlp->io_rid[0] = PCIR_BAR(2); break; default: ctlp->io_rid[0] = PCIR_BAR(0); break; } ctlp->io[0] = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &ctlp->io_rid[0], RF_ACTIVE); if(ctlp->io[0] == NULL) { device_printf(dev, "ioaddr mapping failed for RocketPort(PCI).\n"); retval = ENXIO; goto nogo; } num_aiops = sPCIInitController(ctlp, MAX_AIOPS_PER_BOARD, 0, FREQ_DIS, 0, pci_get_device(dev)); num_ports = 0; for(aiop=0; aiop < num_aiops; aiop++) { sResetAiopByNum(ctlp, aiop); num_ports += sGetAiopNumChan(ctlp, aiop); } retval = rp_attachcommon(ctlp, num_aiops, num_ports); if (retval != 0) goto nogo; return (0); nogo: rp_pcireleaseresource(ctlp); return (retval); } static int rp_pcidetach(device_t dev) { CONTROLLER_t *ctlp; ctlp = device_get_softc(dev); rp_pcireleaseresource(ctlp); return (0); } static int rp_pcishutdown(device_t dev) { CONTROLLER_t *ctlp; ctlp = device_get_softc(dev); rp_pcireleaseresource(ctlp); return (0); } static void rp_pcireleaseresource(CONTROLLER_t *ctlp) { rp_releaseresource(ctlp); if (ctlp->io != NULL) { if (ctlp->io[0] != NULL) bus_release_resource(ctlp->dev, SYS_RES_IOPORT, ctlp->io_rid[0], ctlp->io[0]); free(ctlp->io, M_DEVBUF); ctlp->io = NULL; } if (ctlp->io_rid != NULL) { free(ctlp->io_rid, M_DEVBUF); ctlp->io = NULL; } } static int sPCIInitController( CONTROLLER_t *CtlP, int AiopNum, int IRQNum, Byte_t Frequency, int PeriodicOnly, int VendorDevice) { int i; CtlP->CtlID = CTLID_0001; /* controller release 1 */ sPCIControllerEOI(CtlP); /* Init AIOPs */ CtlP->NumAiop = 0; for(i=0; i < AiopNum; i++) { /*device_printf(CtlP->dev, "aiop %d.\n", i);*/ CtlP->AiopID[i] = sReadAiopID(CtlP, i); /* read AIOP ID */ /*device_printf(CtlP->dev, "ID = %d.\n", CtlP->AiopID[i]);*/ if(CtlP->AiopID[i] == AIOPID_NULL) /* if AIOP does not exist */ { break; /* done looking for AIOPs */ } switch( VendorDevice ) { case RP_DEVICE_ID_4Q: case RP_DEVICE_ID_4J: case RP_DEVICE_ID_4M: CtlP->AiopNumChan[i] = 4; break; case RP_DEVICE_ID_6M: CtlP->AiopNumChan[i] = 6; break; case RP_DEVICE_ID_8O: case RP_DEVICE_ID_8J: case RP_DEVICE_ID_8I: case RP_DEVICE_ID_16I: case RP_DEVICE_ID_32I: CtlP->AiopNumChan[i] = 8; break; default: #ifdef notdef CtlP->AiopNumChan[i] = 8; #else CtlP->AiopNumChan[i] = sReadAiopNumChan(CtlP, i); #endif /* notdef */ break; } /*device_printf(CtlP->dev, "%d channels.\n", CtlP->AiopNumChan[i]);*/ rp_writeaiop2(CtlP, i, _INDX_ADDR,_CLK_PRE); /* clock prescaler */ /*device_printf(CtlP->dev, "configuring clock prescaler.\n");*/ rp_writeaiop1(CtlP, i, _INDX_DATA,CLOCK_PRESC); /*device_printf(CtlP->dev, "configured clock prescaler.\n");*/ CtlP->NumAiop++; /* bump count of AIOPs */ } if(CtlP->NumAiop == 0) return(-1); else return(CtlP->NumAiop); } /* * ARGSUSED * Maps (aiop, offset) to rid. */ static int rp_pci_aiop2rid(int aiop, int offset) { /* Always return zero for a PCI controller. */ return 0; } /* * ARGSUSED * Maps (aiop, offset) to the offset of resource. */ static int rp_pci_aiop2off(int aiop, int offset) { /* Each AIOP reserves 0x40 bytes. */ return aiop * 0x40 + offset; } /* Read the int status for a PCI controller. */ static unsigned char rp_pci_ctlmask(CONTROLLER_t *ctlp) { return sPCIGetControllerIntStatus(ctlp); } static device_method_t rp_pcimethods[] = { /* Device interface */ DEVMETHOD(device_probe, rp_pciprobe), DEVMETHOD(device_attach, rp_pciattach), DEVMETHOD(device_detach, rp_pcidetach), DEVMETHOD(device_shutdown, rp_pcishutdown), { 0, 0 } }; static driver_t rp_pcidriver = { "rp", rp_pcimethods, sizeof(CONTROLLER_t), }; /* * rp can be attached to a pci bus. */ DRIVER_MODULE(rp, pci, rp_pcidriver, rp_devclass, 0, 0); Index: head/sys/dev/sound/midi/midi.c =================================================================== --- head/sys/dev/sound/midi/midi.c (revision 327948) +++ head/sys/dev/sound/midi/midi.c (revision 327949) @@ -1,1542 +1,1543 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-NetBSD * * Copyright (c) 2003 Mathew Kanner * Copyright (c) 1998 The NetBSD Foundation, Inc. * All rights reserved. * * This code is derived from software contributed to The NetBSD Foundation * by Lennart Augustsson (augustss@netbsd.org). * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ /* * Parts of this file started out as NetBSD: midi.c 1.31 * They are mostly gone. Still the most obvious will be the state * machine midi_in */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include "mpu_if.h" #include #include "synth_if.h" MALLOC_DEFINE(M_MIDI, "midi buffers", "Midi data allocation area"); #ifndef KOBJMETHOD_END #define KOBJMETHOD_END { NULL, NULL } #endif #define PCMMKMINOR(u, d, c) ((((c) & 0xff) << 16) | (((u) & 0x0f) << 4) | ((d) & 0x0f)) #define MIDIMKMINOR(u, d, c) PCMMKMINOR(u, d, c) #define MIDI_DEV_RAW 2 #define MIDI_DEV_MIDICTL 12 enum midi_states { MIDI_IN_START, MIDI_IN_SYSEX, MIDI_IN_DATA }; /* * The MPU interface current has init() uninit() inqsize() outqsize() * callback() : fiddle with the tx|rx status. */ #include "mpu_if.h" /* * /dev/rmidi Structure definitions */ #define MIDI_NAMELEN 16 struct snd_midi { KOBJ_FIELDS; struct mtx lock; /* Protects all but queues */ void *cookie; int unit; /* Should only be used in midistat */ int channel; /* Should only be used in midistat */ int busy; int flags; /* File flags */ char name[MIDI_NAMELEN]; struct mtx qlock; /* Protects inq, outq and flags */ MIDIQ_HEAD(, char) inq, outq; int rchan, wchan; struct selinfo rsel, wsel; int hiwat; /* QLEN(outq)>High-water -> disable * writes from userland */ enum midi_states inq_state; int inq_status, inq_left; /* Variables for the state machine in * Midi_in, this is to provide that * signals only get issued only * complete command packets. */ struct proc *async; struct cdev *dev; struct synth_midi *synth; int synth_flags; TAILQ_ENTRY(snd_midi) link; }; struct synth_midi { KOBJ_FIELDS; struct snd_midi *m; }; static synth_open_t midisynth_open; static synth_close_t midisynth_close; static synth_writeraw_t midisynth_writeraw; static synth_killnote_t midisynth_killnote; static synth_startnote_t midisynth_startnote; static synth_setinstr_t midisynth_setinstr; static synth_alloc_t midisynth_alloc; static synth_controller_t midisynth_controller; static synth_bender_t midisynth_bender; static kobj_method_t midisynth_methods[] = { KOBJMETHOD(synth_open, midisynth_open), KOBJMETHOD(synth_close, midisynth_close), KOBJMETHOD(synth_writeraw, midisynth_writeraw), KOBJMETHOD(synth_setinstr, midisynth_setinstr), KOBJMETHOD(synth_startnote, midisynth_startnote), KOBJMETHOD(synth_killnote, midisynth_killnote), KOBJMETHOD(synth_alloc, midisynth_alloc), KOBJMETHOD(synth_controller, midisynth_controller), KOBJMETHOD(synth_bender, midisynth_bender), KOBJMETHOD_END }; DEFINE_CLASS(midisynth, midisynth_methods, 0); /* * Module Exports & Interface * * struct midi_chan *midi_init(MPU_CLASS cls, int unit, int chan, * void *cookie) * int midi_uninit(struct snd_midi *) * * 0 == no error * EBUSY or other error * * int midi_in(struct snd_midi *, char *buf, int count) * int midi_out(struct snd_midi *, char *buf, int count) * * midi_{in,out} return actual size transfered * */ /* * midi_devs tailq, holder of all rmidi instances protected by midistat_lock */ TAILQ_HEAD(, snd_midi) midi_devs; /* * /dev/midistat variables and declarations, protected by midistat_lock */ static struct mtx midistat_lock; static int midistat_isopen = 0; static struct sbuf midistat_sbuf; static int midistat_bufptr; static struct cdev *midistat_dev; /* * /dev/midistat dev_t declarations */ static d_open_t midistat_open; static d_close_t midistat_close; static d_read_t midistat_read; static struct cdevsw midistat_cdevsw = { .d_version = D_VERSION, .d_open = midistat_open, .d_close = midistat_close, .d_read = midistat_read, .d_name = "midistat", }; /* * /dev/rmidi dev_t declarations, struct variable access is protected by * locks contained within the structure. */ static d_open_t midi_open; static d_close_t midi_close; static d_ioctl_t midi_ioctl; static d_read_t midi_read; static d_write_t midi_write; static d_poll_t midi_poll; static struct cdevsw midi_cdevsw = { .d_version = D_VERSION, .d_open = midi_open, .d_close = midi_close, .d_read = midi_read, .d_write = midi_write, .d_ioctl = midi_ioctl, .d_poll = midi_poll, .d_name = "rmidi", }; /* * Prototypes of library functions */ static int midi_destroy(struct snd_midi *, int); static int midistat_prepare(struct sbuf * s); static int midi_load(void); static int midi_unload(void); /* * Misc declr. */ SYSCTL_NODE(_hw, OID_AUTO, midi, CTLFLAG_RD, 0, "Midi driver"); static SYSCTL_NODE(_hw_midi, OID_AUTO, stat, CTLFLAG_RD, 0, "Status device"); int midi_debug; /* XXX: should this be moved into debug.midi? */ SYSCTL_INT(_hw_midi, OID_AUTO, debug, CTLFLAG_RW, &midi_debug, 0, ""); int midi_dumpraw; SYSCTL_INT(_hw_midi, OID_AUTO, dumpraw, CTLFLAG_RW, &midi_dumpraw, 0, ""); int midi_instroff; SYSCTL_INT(_hw_midi, OID_AUTO, instroff, CTLFLAG_RW, &midi_instroff, 0, ""); int midistat_verbose; SYSCTL_INT(_hw_midi_stat, OID_AUTO, verbose, CTLFLAG_RW, &midistat_verbose, 0, ""); #define MIDI_DEBUG(l,a) if(midi_debug>=l) a /* * CODE START */ /* * Register a new rmidi device. cls midi_if interface unit == 0 means * auto-assign new unit number unit != 0 already assigned a unit number, eg. * not the first channel provided by this device. channel, sub-unit * cookie is passed back on MPU calls Typical device drivers will call with * unit=0, channel=1..(number of channels) and cookie=soft_c and won't care * what unit number is used. * * It is an error to call midi_init with an already used unit/channel combo. * * Returns NULL on error * */ struct snd_midi * midi_init(kobj_class_t cls, int unit, int channel, void *cookie) { struct snd_midi *m; int i; int inqsize, outqsize; MIDI_TYPE *buf; MIDI_DEBUG(1, printf("midiinit: unit %d/%d.\n", unit, channel)); mtx_lock(&midistat_lock); /* * Protect against call with existing unit/channel or auto-allocate a * new unit number. */ i = -1; TAILQ_FOREACH(m, &midi_devs, link) { mtx_lock(&m->lock); if (unit != 0) { if (m->unit == unit && m->channel == channel) { mtx_unlock(&m->lock); goto err0; } } else { /* * Find a better unit number */ if (m->unit > i) i = m->unit; } mtx_unlock(&m->lock); } if (unit == 0) unit = i + 1; MIDI_DEBUG(1, printf("midiinit #2: unit %d/%d.\n", unit, channel)); m = malloc(sizeof(*m), M_MIDI, M_NOWAIT | M_ZERO); if (m == NULL) goto err0; m->synth = malloc(sizeof(*m->synth), M_MIDI, M_NOWAIT | M_ZERO); if (m->synth == NULL) goto err1; kobj_init((kobj_t)m->synth, &midisynth_class); m->synth->m = m; kobj_init((kobj_t)m, cls); inqsize = MPU_INQSIZE(m, cookie); outqsize = MPU_OUTQSIZE(m, cookie); MIDI_DEBUG(1, printf("midiinit queues %d/%d.\n", inqsize, outqsize)); if (!inqsize && !outqsize) goto err2; mtx_init(&m->lock, "raw midi", NULL, 0); mtx_init(&m->qlock, "q raw midi", NULL, 0); mtx_lock(&m->lock); mtx_lock(&m->qlock); if (inqsize) - buf = malloc(sizeof(MIDI_TYPE) * inqsize, M_MIDI, M_NOWAIT); + buf = mallocarray(inqsize, sizeof(MIDI_TYPE), M_MIDI, M_NOWAIT); else buf = NULL; MIDIQ_INIT(m->inq, buf, inqsize); if (outqsize) - buf = malloc(sizeof(MIDI_TYPE) * outqsize, M_MIDI, M_NOWAIT); + buf = mallocarray(outqsize, sizeof(MIDI_TYPE), M_MIDI, + M_NOWAIT); else buf = NULL; m->hiwat = outqsize / 2; MIDIQ_INIT(m->outq, buf, outqsize); if ((inqsize && !MIDIQ_BUF(m->inq)) || (outqsize && !MIDIQ_BUF(m->outq))) goto err3; m->busy = 0; m->flags = 0; m->unit = unit; m->channel = channel; m->cookie = cookie; if (MPU_INIT(m, cookie)) goto err3; mtx_unlock(&m->lock); mtx_unlock(&m->qlock); TAILQ_INSERT_TAIL(&midi_devs, m, link); mtx_unlock(&midistat_lock); m->dev = make_dev(&midi_cdevsw, MIDIMKMINOR(unit, MIDI_DEV_RAW, channel), UID_ROOT, GID_WHEEL, 0666, "midi%d.%d", unit, channel); m->dev->si_drv1 = m; return m; err3: mtx_destroy(&m->qlock); mtx_destroy(&m->lock); if (MIDIQ_BUF(m->inq)) free(MIDIQ_BUF(m->inq), M_MIDI); if (MIDIQ_BUF(m->outq)) free(MIDIQ_BUF(m->outq), M_MIDI); err2: free(m->synth, M_MIDI); err1: free(m, M_MIDI); err0: mtx_unlock(&midistat_lock); MIDI_DEBUG(1, printf("midi_init ended in error\n")); return NULL; } /* * midi_uninit does not call MIDI_UNINIT, as since this is the implementors * entry point. midi_uninit if fact, does not send any methods. A call to * midi_uninit is a defacto promise that you won't manipulate ch anymore * */ int midi_uninit(struct snd_midi *m) { int err; err = EBUSY; mtx_lock(&midistat_lock); mtx_lock(&m->lock); if (m->busy) { if (!(m->rchan || m->wchan)) goto err; if (m->rchan) { wakeup(&m->rchan); m->rchan = 0; } if (m->wchan) { wakeup(&m->wchan); m->wchan = 0; } } err = midi_destroy(m, 0); if (!err) goto exit; err: mtx_unlock(&m->lock); exit: mtx_unlock(&midistat_lock); return err; } /* * midi_in: process all data until the queue is full, then discards the rest. * Since midi_in is a state machine, data discards can cause it to get out of * whack. Process as much as possible. It calls, wakeup, selnotify and * psignal at most once. */ #ifdef notdef static int midi_lengths[] = {2, 2, 2, 2, 1, 1, 2, 0}; #endif /* notdef */ /* Number of bytes in a MIDI command */ #define MIDI_LENGTH(d) (midi_lengths[((d) >> 4) & 7]) #define MIDI_ACK 0xfe #define MIDI_IS_STATUS(d) ((d) >= 0x80) #define MIDI_IS_COMMON(d) ((d) >= 0xf0) #define MIDI_SYSEX_START 0xF0 #define MIDI_SYSEX_END 0xF7 int midi_in(struct snd_midi *m, MIDI_TYPE *buf, int size) { /* int i, sig, enq; */ int used; /* MIDI_TYPE data; */ MIDI_DEBUG(5, printf("midi_in: m=%p size=%d\n", m, size)); /* * XXX: locking flub */ if (!(m->flags & M_RX)) return size; used = 0; mtx_lock(&m->qlock); #if 0 /* * Don't bother queuing if not in read mode. Discard everything and * return size so the caller doesn't freak out. */ if (!(m->flags & M_RX)) return size; for (i = sig = 0; i < size; i++) { data = buf[i]; enq = 0; if (data == MIDI_ACK) continue; switch (m->inq_state) { case MIDI_IN_START: if (MIDI_IS_STATUS(data)) { switch (data) { case 0xf0: /* Sysex */ m->inq_state = MIDI_IN_SYSEX; break; case 0xf1: /* MTC quarter frame */ case 0xf3: /* Song select */ m->inq_state = MIDI_IN_DATA; enq = 1; m->inq_left = 1; break; case 0xf2: /* Song position pointer */ m->inq_state = MIDI_IN_DATA; enq = 1; m->inq_left = 2; break; default: if (MIDI_IS_COMMON(data)) { enq = 1; sig = 1; } else { m->inq_state = MIDI_IN_DATA; enq = 1; m->inq_status = data; m->inq_left = MIDI_LENGTH(data); } break; } } else if (MIDI_IS_STATUS(m->inq_status)) { m->inq_state = MIDI_IN_DATA; if (!MIDIQ_FULL(m->inq)) { used++; MIDIQ_ENQ(m->inq, &m->inq_status, 1); } enq = 1; m->inq_left = MIDI_LENGTH(m->inq_status) - 1; } break; /* * End of case MIDI_IN_START: */ case MIDI_IN_DATA: enq = 1; if (--m->inq_left <= 0) sig = 1;/* deliver data */ break; case MIDI_IN_SYSEX: if (data == MIDI_SYSEX_END) m->inq_state = MIDI_IN_START; break; } if (enq) if (!MIDIQ_FULL(m->inq)) { MIDIQ_ENQ(m->inq, &data, 1); used++; } /* * End of the state machines main "for loop" */ } if (sig) { #endif MIDI_DEBUG(6, printf("midi_in: len %jd avail %jd\n", (intmax_t)MIDIQ_LEN(m->inq), (intmax_t)MIDIQ_AVAIL(m->inq))); if (MIDIQ_AVAIL(m->inq) > size) { used = size; MIDIQ_ENQ(m->inq, buf, size); } else { MIDI_DEBUG(4, printf("midi_in: Discarding data qu\n")); mtx_unlock(&m->qlock); return 0; } if (m->rchan) { wakeup(&m->rchan); m->rchan = 0; } selwakeup(&m->rsel); if (m->async) { PROC_LOCK(m->async); kern_psignal(m->async, SIGIO); PROC_UNLOCK(m->async); } #if 0 } #endif mtx_unlock(&m->qlock); return used; } /* * midi_out: The only clearer of the M_TXEN flag. */ int midi_out(struct snd_midi *m, MIDI_TYPE *buf, int size) { int used; /* * XXX: locking flub */ if (!(m->flags & M_TXEN)) return 0; MIDI_DEBUG(2, printf("midi_out: %p\n", m)); mtx_lock(&m->qlock); used = MIN(size, MIDIQ_LEN(m->outq)); MIDI_DEBUG(3, printf("midi_out: used %d\n", used)); if (used) MIDIQ_DEQ(m->outq, buf, used); if (MIDIQ_EMPTY(m->outq)) { m->flags &= ~M_TXEN; MPU_CALLBACKP(m, m->cookie, m->flags); } if (used && MIDIQ_AVAIL(m->outq) > m->hiwat) { if (m->wchan) { wakeup(&m->wchan); m->wchan = 0; } selwakeup(&m->wsel); if (m->async) { PROC_LOCK(m->async); kern_psignal(m->async, SIGIO); PROC_UNLOCK(m->async); } } mtx_unlock(&m->qlock); return used; } /* * /dev/rmidi#.# device access functions */ int midi_open(struct cdev *i_dev, int flags, int mode, struct thread *td) { struct snd_midi *m = i_dev->si_drv1; int retval; MIDI_DEBUG(1, printf("midiopen %p %s %s\n", td, flags & FREAD ? "M_RX" : "", flags & FWRITE ? "M_TX" : "")); if (m == NULL) return ENXIO; mtx_lock(&m->lock); mtx_lock(&m->qlock); retval = 0; if (flags & FREAD) { if (MIDIQ_SIZE(m->inq) == 0) retval = ENXIO; else if (m->flags & M_RX) retval = EBUSY; if (retval) goto err; } if (flags & FWRITE) { if (MIDIQ_SIZE(m->outq) == 0) retval = ENXIO; else if (m->flags & M_TX) retval = EBUSY; if (retval) goto err; } m->busy++; m->rchan = 0; m->wchan = 0; m->async = 0; if (flags & FREAD) { m->flags |= M_RX | M_RXEN; /* * Only clear the inq, the outq might still have data to drain * from a previous session */ MIDIQ_CLEAR(m->inq); } if (flags & FWRITE) m->flags |= M_TX; MPU_CALLBACK(m, m->cookie, m->flags); MIDI_DEBUG(2, printf("midi_open: opened.\n")); err: mtx_unlock(&m->qlock); mtx_unlock(&m->lock); return retval; } int midi_close(struct cdev *i_dev, int flags, int mode, struct thread *td) { struct snd_midi *m = i_dev->si_drv1; int retval; int oldflags; MIDI_DEBUG(1, printf("midi_close %p %s %s\n", td, flags & FREAD ? "M_RX" : "", flags & FWRITE ? "M_TX" : "")); if (m == NULL) return ENXIO; mtx_lock(&m->lock); mtx_lock(&m->qlock); if ((flags & FREAD && !(m->flags & M_RX)) || (flags & FWRITE && !(m->flags & M_TX))) { retval = ENXIO; goto err; } m->busy--; oldflags = m->flags; if (flags & FREAD) m->flags &= ~(M_RX | M_RXEN); if (flags & FWRITE) m->flags &= ~M_TX; if ((m->flags & (M_TXEN | M_RXEN)) != (oldflags & (M_RXEN | M_TXEN))) MPU_CALLBACK(m, m->cookie, m->flags); MIDI_DEBUG(1, printf("midi_close: closed, busy = %d.\n", m->busy)); mtx_unlock(&m->qlock); mtx_unlock(&m->lock); retval = 0; err: return retval; } /* * TODO: midi_read, per oss programmer's guide pg. 42 should return as soon * as data is available. */ int midi_read(struct cdev *i_dev, struct uio *uio, int ioflag) { #define MIDI_RSIZE 32 struct snd_midi *m = i_dev->si_drv1; int retval; int used; char buf[MIDI_RSIZE]; MIDI_DEBUG(5, printf("midiread: count=%lu\n", (unsigned long)uio->uio_resid)); retval = EIO; if (m == NULL) goto err0; mtx_lock(&m->lock); mtx_lock(&m->qlock); if (!(m->flags & M_RX)) goto err1; while (uio->uio_resid > 0) { while (MIDIQ_EMPTY(m->inq)) { retval = EWOULDBLOCK; if (ioflag & O_NONBLOCK) goto err1; mtx_unlock(&m->lock); m->rchan = 1; retval = msleep(&m->rchan, &m->qlock, PCATCH | PDROP, "midi RX", 0); /* * We slept, maybe things have changed since last * dying check */ if (retval == EINTR) goto err0; if (m != i_dev->si_drv1) retval = ENXIO; /* if (retval && retval != ERESTART) */ if (retval) goto err0; mtx_lock(&m->lock); mtx_lock(&m->qlock); m->rchan = 0; if (!m->busy) goto err1; } MIDI_DEBUG(6, printf("midi_read start\n")); /* * At this point, it is certain that m->inq has data */ used = MIN(MIDIQ_LEN(m->inq), uio->uio_resid); used = MIN(used, MIDI_RSIZE); MIDI_DEBUG(6, printf("midiread: uiomove cc=%d\n", used)); MIDIQ_DEQ(m->inq, buf, used); retval = uiomove(buf, used, uio); if (retval) goto err1; } /* * If we Made it here then transfer is good */ retval = 0; err1: mtx_unlock(&m->qlock); mtx_unlock(&m->lock); err0: MIDI_DEBUG(4, printf("midi_read: ret %d\n", retval)); return retval; } /* * midi_write: The only setter of M_TXEN */ int midi_write(struct cdev *i_dev, struct uio *uio, int ioflag) { #define MIDI_WSIZE 32 struct snd_midi *m = i_dev->si_drv1; int retval; int used; char buf[MIDI_WSIZE]; MIDI_DEBUG(4, printf("midi_write\n")); retval = 0; if (m == NULL) goto err0; mtx_lock(&m->lock); mtx_lock(&m->qlock); if (!(m->flags & M_TX)) goto err1; while (uio->uio_resid > 0) { while (MIDIQ_AVAIL(m->outq) == 0) { retval = EWOULDBLOCK; if (ioflag & O_NONBLOCK) goto err1; mtx_unlock(&m->lock); m->wchan = 1; MIDI_DEBUG(3, printf("midi_write msleep\n")); retval = msleep(&m->wchan, &m->qlock, PCATCH | PDROP, "midi TX", 0); /* * We slept, maybe things have changed since last * dying check */ if (retval == EINTR) goto err0; if (m != i_dev->si_drv1) retval = ENXIO; if (retval) goto err0; mtx_lock(&m->lock); mtx_lock(&m->qlock); m->wchan = 0; if (!m->busy) goto err1; } /* * We are certain than data can be placed on the queue */ used = MIN(MIDIQ_AVAIL(m->outq), uio->uio_resid); used = MIN(used, MIDI_WSIZE); MIDI_DEBUG(5, printf("midiout: resid %zd len %jd avail %jd\n", uio->uio_resid, (intmax_t)MIDIQ_LEN(m->outq), (intmax_t)MIDIQ_AVAIL(m->outq))); MIDI_DEBUG(5, printf("midi_write: uiomove cc=%d\n", used)); retval = uiomove(buf, used, uio); if (retval) goto err1; MIDIQ_ENQ(m->outq, buf, used); /* * Inform the bottom half that data can be written */ if (!(m->flags & M_TXEN)) { m->flags |= M_TXEN; MPU_CALLBACK(m, m->cookie, m->flags); } } /* * If we Made it here then transfer is good */ retval = 0; err1: mtx_unlock(&m->qlock); mtx_unlock(&m->lock); err0: return retval; } int midi_ioctl(struct cdev *i_dev, u_long cmd, caddr_t arg, int mode, struct thread *td) { return ENXIO; } int midi_poll(struct cdev *i_dev, int events, struct thread *td) { struct snd_midi *m = i_dev->si_drv1; int revents; if (m == NULL) return 0; revents = 0; mtx_lock(&m->lock); mtx_lock(&m->qlock); if (events & (POLLIN | POLLRDNORM)) if (!MIDIQ_EMPTY(m->inq)) events |= events & (POLLIN | POLLRDNORM); if (events & (POLLOUT | POLLWRNORM)) if (MIDIQ_AVAIL(m->outq) < m->hiwat) events |= events & (POLLOUT | POLLWRNORM); if (revents == 0) { if (events & (POLLIN | POLLRDNORM)) selrecord(td, &m->rsel); if (events & (POLLOUT | POLLWRNORM)) selrecord(td, &m->wsel); } mtx_unlock(&m->lock); mtx_unlock(&m->qlock); return (revents); } /* * /dev/midistat device functions * */ static int midistat_open(struct cdev *i_dev, int flags, int mode, struct thread *td) { int error; MIDI_DEBUG(1, printf("midistat_open\n")); mtx_lock(&midistat_lock); if (midistat_isopen) { mtx_unlock(&midistat_lock); return EBUSY; } midistat_isopen = 1; mtx_unlock(&midistat_lock); if (sbuf_new(&midistat_sbuf, NULL, 4096, SBUF_AUTOEXTEND) == NULL) { error = ENXIO; mtx_lock(&midistat_lock); goto out; } mtx_lock(&midistat_lock); midistat_bufptr = 0; error = (midistat_prepare(&midistat_sbuf) > 0) ? 0 : ENOMEM; out: if (error) midistat_isopen = 0; mtx_unlock(&midistat_lock); return error; } static int midistat_close(struct cdev *i_dev, int flags, int mode, struct thread *td) { MIDI_DEBUG(1, printf("midistat_close\n")); mtx_lock(&midistat_lock); if (!midistat_isopen) { mtx_unlock(&midistat_lock); return EBADF; } sbuf_delete(&midistat_sbuf); midistat_isopen = 0; mtx_unlock(&midistat_lock); return 0; } static int midistat_read(struct cdev *i_dev, struct uio *buf, int flag) { int l, err; MIDI_DEBUG(4, printf("midistat_read\n")); mtx_lock(&midistat_lock); if (!midistat_isopen) { mtx_unlock(&midistat_lock); return EBADF; } l = min(buf->uio_resid, sbuf_len(&midistat_sbuf) - midistat_bufptr); err = 0; if (l > 0) { mtx_unlock(&midistat_lock); err = uiomove(sbuf_data(&midistat_sbuf) + midistat_bufptr, l, buf); mtx_lock(&midistat_lock); } else l = 0; midistat_bufptr += l; mtx_unlock(&midistat_lock); return err; } /* * Module library functions */ static int midistat_prepare(struct sbuf *s) { struct snd_midi *m; mtx_assert(&midistat_lock, MA_OWNED); sbuf_printf(s, "FreeBSD Midi Driver (midi2)\n"); if (TAILQ_EMPTY(&midi_devs)) { sbuf_printf(s, "No devices installed.\n"); sbuf_finish(s); return sbuf_len(s); } sbuf_printf(s, "Installed devices:\n"); TAILQ_FOREACH(m, &midi_devs, link) { mtx_lock(&m->lock); sbuf_printf(s, "%s [%d/%d:%s]", m->name, m->unit, m->channel, MPU_PROVIDER(m, m->cookie)); sbuf_printf(s, "%s", MPU_DESCR(m, m->cookie, midistat_verbose)); sbuf_printf(s, "\n"); mtx_unlock(&m->lock); } sbuf_finish(s); return sbuf_len(s); } #ifdef notdef /* * Convert IOCTL command to string for debugging */ static char * midi_cmdname(int cmd) { static struct { int cmd; char *name; } *tab, cmdtab_midiioctl[] = { #define A(x) {x, ## x} /* * Once we have some real IOCTLs define, the following will * be relavant. * * A(SNDCTL_MIDI_PRETIME), A(SNDCTL_MIDI_MPUMODE), * A(SNDCTL_MIDI_MPUCMD), A(SNDCTL_SYNTH_INFO), * A(SNDCTL_MIDI_INFO), A(SNDCTL_SYNTH_MEMAVL), * A(SNDCTL_FM_LOAD_INSTR), A(SNDCTL_FM_4OP_ENABLE), * A(MIOSPASSTHRU), A(MIOGPASSTHRU), A(AIONWRITE), * A(AIOGSIZE), A(AIOSSIZE), A(AIOGFMT), A(AIOSFMT), * A(AIOGMIX), A(AIOSMIX), A(AIOSTOP), A(AIOSYNC), * A(AIOGCAP), */ #undef A { -1, "unknown" }, }; for (tab = cmdtab_midiioctl; tab->cmd != cmd && tab->cmd != -1; tab++); return tab->name; } #endif /* notdef */ /* * midisynth */ int midisynth_open(void *n, void *arg, int flags) { struct snd_midi *m = ((struct synth_midi *)n)->m; int retval; MIDI_DEBUG(1, printf("midisynth_open %s %s\n", flags & FREAD ? "M_RX" : "", flags & FWRITE ? "M_TX" : "")); if (m == NULL) return ENXIO; mtx_lock(&m->lock); mtx_lock(&m->qlock); retval = 0; if (flags & FREAD) { if (MIDIQ_SIZE(m->inq) == 0) retval = ENXIO; else if (m->flags & M_RX) retval = EBUSY; if (retval) goto err; } if (flags & FWRITE) { if (MIDIQ_SIZE(m->outq) == 0) retval = ENXIO; else if (m->flags & M_TX) retval = EBUSY; if (retval) goto err; } m->busy++; /* * TODO: Consider m->async = 0; */ if (flags & FREAD) { m->flags |= M_RX | M_RXEN; /* * Only clear the inq, the outq might still have data to drain * from a previous session */ MIDIQ_CLEAR(m->inq); m->rchan = 0; } if (flags & FWRITE) { m->flags |= M_TX; m->wchan = 0; } m->synth_flags = flags & (FREAD | FWRITE); MPU_CALLBACK(m, m->cookie, m->flags); err: mtx_unlock(&m->qlock); mtx_unlock(&m->lock); MIDI_DEBUG(2, printf("midisynth_open: return %d.\n", retval)); return retval; } int midisynth_close(void *n) { struct snd_midi *m = ((struct synth_midi *)n)->m; int retval; int oldflags; MIDI_DEBUG(1, printf("midisynth_close %s %s\n", m->synth_flags & FREAD ? "M_RX" : "", m->synth_flags & FWRITE ? "M_TX" : "")); if (m == NULL) return ENXIO; mtx_lock(&m->lock); mtx_lock(&m->qlock); if ((m->synth_flags & FREAD && !(m->flags & M_RX)) || (m->synth_flags & FWRITE && !(m->flags & M_TX))) { retval = ENXIO; goto err; } m->busy--; oldflags = m->flags; if (m->synth_flags & FREAD) m->flags &= ~(M_RX | M_RXEN); if (m->synth_flags & FWRITE) m->flags &= ~M_TX; if ((m->flags & (M_TXEN | M_RXEN)) != (oldflags & (M_RXEN | M_TXEN))) MPU_CALLBACK(m, m->cookie, m->flags); MIDI_DEBUG(1, printf("midi_close: closed, busy = %d.\n", m->busy)); mtx_unlock(&m->qlock); mtx_unlock(&m->lock); retval = 0; err: return retval; } /* * Always blocking. */ int midisynth_writeraw(void *n, uint8_t *buf, size_t len) { struct snd_midi *m = ((struct synth_midi *)n)->m; int retval; int used; int i; MIDI_DEBUG(4, printf("midisynth_writeraw\n")); retval = 0; if (m == NULL) return ENXIO; mtx_lock(&m->lock); mtx_lock(&m->qlock); if (!(m->flags & M_TX)) goto err1; if (midi_dumpraw) printf("midi dump: "); while (len > 0) { while (MIDIQ_AVAIL(m->outq) == 0) { if (!(m->flags & M_TXEN)) { m->flags |= M_TXEN; MPU_CALLBACK(m, m->cookie, m->flags); } mtx_unlock(&m->lock); m->wchan = 1; MIDI_DEBUG(3, printf("midisynth_writeraw msleep\n")); retval = msleep(&m->wchan, &m->qlock, PCATCH | PDROP, "midi TX", 0); /* * We slept, maybe things have changed since last * dying check */ if (retval == EINTR) goto err0; if (retval) goto err0; mtx_lock(&m->lock); mtx_lock(&m->qlock); m->wchan = 0; if (!m->busy) goto err1; } /* * We are certain than data can be placed on the queue */ used = MIN(MIDIQ_AVAIL(m->outq), len); used = MIN(used, MIDI_WSIZE); MIDI_DEBUG(5, printf("midi_synth: resid %zu len %jd avail %jd\n", len, (intmax_t)MIDIQ_LEN(m->outq), (intmax_t)MIDIQ_AVAIL(m->outq))); if (midi_dumpraw) for (i = 0; i < used; i++) printf("%x ", buf[i]); MIDIQ_ENQ(m->outq, buf, used); len -= used; /* * Inform the bottom half that data can be written */ if (!(m->flags & M_TXEN)) { m->flags |= M_TXEN; MPU_CALLBACK(m, m->cookie, m->flags); } } /* * If we Made it here then transfer is good */ if (midi_dumpraw) printf("\n"); retval = 0; err1: mtx_unlock(&m->qlock); mtx_unlock(&m->lock); err0: return retval; } static int midisynth_killnote(void *n, uint8_t chn, uint8_t note, uint8_t vel) { u_char c[3]; if (note > 127 || chn > 15) return (EINVAL); if (vel > 127) vel = 127; if (vel == 64) { c[0] = 0x90 | (chn & 0x0f); /* Note on. */ c[1] = (u_char)note; c[2] = 0; } else { c[0] = 0x80 | (chn & 0x0f); /* Note off. */ c[1] = (u_char)note; c[2] = (u_char)vel; } return midisynth_writeraw(n, c, 3); } static int midisynth_setinstr(void *n, uint8_t chn, uint16_t instr) { u_char c[2]; if (instr > 127 || chn > 15) return EINVAL; c[0] = 0xc0 | (chn & 0x0f); /* Progamme change. */ c[1] = instr + midi_instroff; return midisynth_writeraw(n, c, 2); } static int midisynth_startnote(void *n, uint8_t chn, uint8_t note, uint8_t vel) { u_char c[3]; if (note > 127 || chn > 15) return EINVAL; if (vel > 127) vel = 127; c[0] = 0x90 | (chn & 0x0f); /* Note on. */ c[1] = (u_char)note; c[2] = (u_char)vel; return midisynth_writeraw(n, c, 3); } static int midisynth_alloc(void *n, uint8_t chan, uint8_t note) { return chan; } static int midisynth_controller(void *n, uint8_t chn, uint8_t ctrlnum, uint16_t val) { u_char c[3]; if (ctrlnum > 127 || chn > 15) return EINVAL; c[0] = 0xb0 | (chn & 0x0f); /* Control Message. */ c[1] = ctrlnum; c[2] = val; return midisynth_writeraw(n, c, 3); } static int midisynth_bender(void *n, uint8_t chn, uint16_t val) { u_char c[3]; if (val > 16383 || chn > 15) return EINVAL; c[0] = 0xe0 | (chn & 0x0f); /* Pitch bend. */ c[1] = (u_char)val & 0x7f; c[2] = (u_char)(val >> 7) & 0x7f; return midisynth_writeraw(n, c, 3); } /* * Single point of midi destructions. */ static int midi_destroy(struct snd_midi *m, int midiuninit) { mtx_assert(&midistat_lock, MA_OWNED); mtx_assert(&m->lock, MA_OWNED); MIDI_DEBUG(3, printf("midi_destroy\n")); m->dev->si_drv1 = NULL; mtx_unlock(&m->lock); /* XXX */ destroy_dev(m->dev); TAILQ_REMOVE(&midi_devs, m, link); if (midiuninit) MPU_UNINIT(m, m->cookie); free(MIDIQ_BUF(m->inq), M_MIDI); free(MIDIQ_BUF(m->outq), M_MIDI); mtx_destroy(&m->qlock); mtx_destroy(&m->lock); free(m->synth, M_MIDI); free(m, M_MIDI); return 0; } /* * Load and unload functions, creates the /dev/midistat device */ static int midi_load(void) { mtx_init(&midistat_lock, "midistat lock", NULL, 0); TAILQ_INIT(&midi_devs); /* Initialize the queue. */ midistat_dev = make_dev(&midistat_cdevsw, MIDIMKMINOR(0, MIDI_DEV_MIDICTL, 0), UID_ROOT, GID_WHEEL, 0666, "midistat"); return 0; } static int midi_unload(void) { struct snd_midi *m, *tmp; int retval; MIDI_DEBUG(1, printf("midi_unload()\n")); retval = EBUSY; mtx_lock(&midistat_lock); if (midistat_isopen) goto exit0; TAILQ_FOREACH_SAFE(m, &midi_devs, link, tmp) { mtx_lock(&m->lock); if (m->busy) retval = EBUSY; else retval = midi_destroy(m, 1); if (retval) goto exit1; } mtx_unlock(&midistat_lock); /* XXX */ destroy_dev(midistat_dev); /* * Made it here then unload is complete */ mtx_destroy(&midistat_lock); return 0; exit1: mtx_unlock(&m->lock); exit0: mtx_unlock(&midistat_lock); if (retval) MIDI_DEBUG(2, printf("midi_unload: failed\n")); return retval; } extern int seq_modevent(module_t mod, int type, void *data); static int midi_modevent(module_t mod, int type, void *data) { int retval; retval = 0; switch (type) { case MOD_LOAD: retval = midi_load(); #if 0 if (retval == 0) retval = seq_modevent(mod, type, data); #endif break; case MOD_UNLOAD: retval = midi_unload(); #if 0 if (retval == 0) retval = seq_modevent(mod, type, data); #endif break; default: break; } return retval; } kobj_t midimapper_addseq(void *arg1, int *unit, void **cookie) { unit = NULL; return (kobj_t)arg1; } int midimapper_open(void *arg1, void **cookie) { int retval = 0; struct snd_midi *m; mtx_lock(&midistat_lock); TAILQ_FOREACH(m, &midi_devs, link) { retval++; } mtx_unlock(&midistat_lock); return retval; } int midimapper_close(void *arg1, void *cookie) { return 0; } kobj_t midimapper_fetch_synth(void *arg, void *cookie, int unit) { struct snd_midi *m; int retval = 0; mtx_lock(&midistat_lock); TAILQ_FOREACH(m, &midi_devs, link) { if (unit == retval) { mtx_unlock(&midistat_lock); return (kobj_t)m->synth; } retval++; } mtx_unlock(&midistat_lock); return NULL; } DEV_MODULE(midi, midi_modevent, NULL); MODULE_VERSION(midi, 1); Index: head/sys/dev/sound/pci/hda/hdaa.c =================================================================== --- head/sys/dev/sound/pci/hda/hdaa.c (revision 327948) +++ head/sys/dev/sound/pci/hda/hdaa.c (revision 327949) @@ -1,7152 +1,7152 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2006 Stephane E. Potvin * Copyright (c) 2006 Ariff Abdullah * Copyright (c) 2008-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Intel High Definition Audio (Audio function) driver for FreeBSD. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include #include "mixer_if.h" SND_DECLARE_FILE("$FreeBSD$"); #define hdaa_lock(devinfo) snd_mtxlock((devinfo)->lock) #define hdaa_unlock(devinfo) snd_mtxunlock((devinfo)->lock) #define hdaa_lockassert(devinfo) snd_mtxassert((devinfo)->lock) #define hdaa_lockowned(devinfo) mtx_owned((devinfo)->lock) static const struct { const char *key; uint32_t value; } hdaa_quirks_tab[] = { { "softpcmvol", HDAA_QUIRK_SOFTPCMVOL }, { "fixedrate", HDAA_QUIRK_FIXEDRATE }, { "forcestereo", HDAA_QUIRK_FORCESTEREO }, { "eapdinv", HDAA_QUIRK_EAPDINV }, { "senseinv", HDAA_QUIRK_SENSEINV }, { "ivref50", HDAA_QUIRK_IVREF50 }, { "ivref80", HDAA_QUIRK_IVREF80 }, { "ivref100", HDAA_QUIRK_IVREF100 }, { "ovref50", HDAA_QUIRK_OVREF50 }, { "ovref80", HDAA_QUIRK_OVREF80 }, { "ovref100", HDAA_QUIRK_OVREF100 }, { "ivref", HDAA_QUIRK_IVREF }, { "ovref", HDAA_QUIRK_OVREF }, { "vref", HDAA_QUIRK_VREF }, }; #define HDA_PARSE_MAXDEPTH 10 MALLOC_DEFINE(M_HDAA, "hdaa", "HDA Audio"); static const char *HDA_COLORS[16] = {"Unknown", "Black", "Grey", "Blue", "Green", "Red", "Orange", "Yellow", "Purple", "Pink", "Res.A", "Res.B", "Res.C", "Res.D", "White", "Other"}; static const char *HDA_DEVS[16] = {"Line-out", "Speaker", "Headphones", "CD", "SPDIF-out", "Digital-out", "Modem-line", "Modem-handset", "Line-in", "AUX", "Mic", "Telephony", "SPDIF-in", "Digital-in", "Res.E", "Other"}; static const char *HDA_CONNS[4] = {"Jack", "None", "Fixed", "Both"}; static const char *HDA_CONNECTORS[16] = { "Unknown", "1/8", "1/4", "ATAPI", "RCA", "Optical", "Digital", "Analog", "DIN", "XLR", "RJ-11", "Combo", "0xc", "0xd", "0xe", "Other" }; static const char *HDA_LOCS[64] = { "0x00", "Rear", "Front", "Left", "Right", "Top", "Bottom", "Rear-panel", "Drive-bay", "0x09", "0x0a", "0x0b", "0x0c", "0x0d", "0x0e", "0x0f", "Internal", "0x11", "0x12", "0x13", "0x14", "0x15", "0x16", "Riser", "0x18", "Onboard", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f", "External", "Ext-Rear", "Ext-Front", "Ext-Left", "Ext-Right", "Ext-Top", "Ext-Bottom", "0x07", "0x28", "0x29", "0x2a", "0x2b", "0x2c", "0x2d", "0x2e", "0x2f", "Other", "0x31", "0x32", "0x33", "0x34", "0x35", "Other-Bott", "Lid-In", "Lid-Out", "0x39", "0x3a", "0x3b", "0x3c", "0x3d", "0x3e", "0x3f" }; static const char *HDA_GPIO_ACTIONS[8] = { "keep", "set", "clear", "disable", "input", "0x05", "0x06", "0x07"}; static const char *HDA_HDMI_CODING_TYPES[18] = { "undefined", "LPCM", "AC-3", "MPEG1", "MP3", "MPEG2", "AAC-LC", "DTS", "ATRAC", "DSD", "E-AC-3", "DTS-HD", "MLP", "DST", "WMAPro", "HE-AAC", "HE-AACv2", "MPEG-Surround" }; /* Default */ static uint32_t hdaa_fmt[] = { SND_FORMAT(AFMT_S16_LE, 2, 0), 0 }; static struct pcmchan_caps hdaa_caps = {48000, 48000, hdaa_fmt, 0}; static const struct { uint32_t rate; int valid; uint16_t base; uint16_t mul; uint16_t div; } hda_rate_tab[] = { { 8000, 1, 0x0000, 0x0000, 0x0500 }, /* (48000 * 1) / 6 */ { 9600, 0, 0x0000, 0x0000, 0x0400 }, /* (48000 * 1) / 5 */ { 12000, 0, 0x0000, 0x0000, 0x0300 }, /* (48000 * 1) / 4 */ { 16000, 1, 0x0000, 0x0000, 0x0200 }, /* (48000 * 1) / 3 */ { 18000, 0, 0x0000, 0x1000, 0x0700 }, /* (48000 * 3) / 8 */ { 19200, 0, 0x0000, 0x0800, 0x0400 }, /* (48000 * 2) / 5 */ { 24000, 0, 0x0000, 0x0000, 0x0100 }, /* (48000 * 1) / 2 */ { 28800, 0, 0x0000, 0x1000, 0x0400 }, /* (48000 * 3) / 5 */ { 32000, 1, 0x0000, 0x0800, 0x0200 }, /* (48000 * 2) / 3 */ { 36000, 0, 0x0000, 0x1000, 0x0300 }, /* (48000 * 3) / 4 */ { 38400, 0, 0x0000, 0x1800, 0x0400 }, /* (48000 * 4) / 5 */ { 48000, 1, 0x0000, 0x0000, 0x0000 }, /* (48000 * 1) / 1 */ { 64000, 0, 0x0000, 0x1800, 0x0200 }, /* (48000 * 4) / 3 */ { 72000, 0, 0x0000, 0x1000, 0x0100 }, /* (48000 * 3) / 2 */ { 96000, 1, 0x0000, 0x0800, 0x0000 }, /* (48000 * 2) / 1 */ { 144000, 0, 0x0000, 0x1000, 0x0000 }, /* (48000 * 3) / 1 */ { 192000, 1, 0x0000, 0x1800, 0x0000 }, /* (48000 * 4) / 1 */ { 8820, 0, 0x4000, 0x0000, 0x0400 }, /* (44100 * 1) / 5 */ { 11025, 1, 0x4000, 0x0000, 0x0300 }, /* (44100 * 1) / 4 */ { 12600, 0, 0x4000, 0x0800, 0x0600 }, /* (44100 * 2) / 7 */ { 14700, 0, 0x4000, 0x0000, 0x0200 }, /* (44100 * 1) / 3 */ { 17640, 0, 0x4000, 0x0800, 0x0400 }, /* (44100 * 2) / 5 */ { 18900, 0, 0x4000, 0x1000, 0x0600 }, /* (44100 * 3) / 7 */ { 22050, 1, 0x4000, 0x0000, 0x0100 }, /* (44100 * 1) / 2 */ { 25200, 0, 0x4000, 0x1800, 0x0600 }, /* (44100 * 4) / 7 */ { 26460, 0, 0x4000, 0x1000, 0x0400 }, /* (44100 * 3) / 5 */ { 29400, 0, 0x4000, 0x0800, 0x0200 }, /* (44100 * 2) / 3 */ { 33075, 0, 0x4000, 0x1000, 0x0300 }, /* (44100 * 3) / 4 */ { 35280, 0, 0x4000, 0x1800, 0x0400 }, /* (44100 * 4) / 5 */ { 44100, 1, 0x4000, 0x0000, 0x0000 }, /* (44100 * 1) / 1 */ { 58800, 0, 0x4000, 0x1800, 0x0200 }, /* (44100 * 4) / 3 */ { 66150, 0, 0x4000, 0x1000, 0x0100 }, /* (44100 * 3) / 2 */ { 88200, 1, 0x4000, 0x0800, 0x0000 }, /* (44100 * 2) / 1 */ { 132300, 0, 0x4000, 0x1000, 0x0000 }, /* (44100 * 3) / 1 */ { 176400, 1, 0x4000, 0x1800, 0x0000 }, /* (44100 * 4) / 1 */ }; #define HDA_RATE_TAB_LEN (sizeof(hda_rate_tab) / sizeof(hda_rate_tab[0])) const static char *ossnames[] = SOUND_DEVICE_NAMES; /**************************************************************************** * Function prototypes ****************************************************************************/ static int hdaa_pcmchannel_setup(struct hdaa_chan *); static void hdaa_widget_connection_select(struct hdaa_widget *, uint8_t); static void hdaa_audio_ctl_amp_set(struct hdaa_audio_ctl *, uint32_t, int, int); static struct hdaa_audio_ctl *hdaa_audio_ctl_amp_get(struct hdaa_devinfo *, nid_t, int, int, int); static void hdaa_audio_ctl_amp_set_internal(struct hdaa_devinfo *, nid_t, int, int, int, int, int, int); static void hdaa_dump_pin_config(struct hdaa_widget *w, uint32_t conf); static char * hdaa_audio_ctl_ossmixer_mask2allname(uint32_t mask, char *buf, size_t len) { int i, first = 1; bzero(buf, len); for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { if (mask & (1 << i)) { if (first == 0) strlcat(buf, ", ", len); strlcat(buf, ossnames[i], len); first = 0; } } return (buf); } static struct hdaa_audio_ctl * hdaa_audio_ctl_each(struct hdaa_devinfo *devinfo, int *index) { if (devinfo == NULL || index == NULL || devinfo->ctl == NULL || devinfo->ctlcnt < 1 || *index < 0 || *index >= devinfo->ctlcnt) return (NULL); return (&devinfo->ctl[(*index)++]); } static struct hdaa_audio_ctl * hdaa_audio_ctl_amp_get(struct hdaa_devinfo *devinfo, nid_t nid, int dir, int index, int cnt) { struct hdaa_audio_ctl *ctl; int i, found = 0; if (devinfo == NULL || devinfo->ctl == NULL) return (NULL); i = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { if (ctl->enable == 0) continue; if (ctl->widget->nid != nid) continue; if (dir && ctl->ndir != dir) continue; if (index >= 0 && ctl->ndir == HDAA_CTL_IN && ctl->dir == ctl->ndir && ctl->index != index) continue; found++; if (found == cnt || cnt <= 0) return (ctl); } return (NULL); } static const struct matrix { struct pcmchan_matrix m; int analog; } matrixes[] = { { SND_CHN_MATRIX_MAP_1_0, 1 }, { SND_CHN_MATRIX_MAP_2_0, 1 }, { SND_CHN_MATRIX_MAP_2_1, 0 }, { SND_CHN_MATRIX_MAP_3_0, 0 }, { SND_CHN_MATRIX_MAP_3_1, 0 }, { SND_CHN_MATRIX_MAP_4_0, 1 }, { SND_CHN_MATRIX_MAP_4_1, 0 }, { SND_CHN_MATRIX_MAP_5_0, 0 }, { SND_CHN_MATRIX_MAP_5_1, 1 }, { SND_CHN_MATRIX_MAP_6_0, 0 }, { SND_CHN_MATRIX_MAP_6_1, 0 }, { SND_CHN_MATRIX_MAP_7_0, 0 }, { SND_CHN_MATRIX_MAP_7_1, 1 }, }; static const char *channel_names[] = SND_CHN_T_NAMES; /* * Connected channels change handler. */ static void hdaa_channels_handler(struct hdaa_audio_as *as) { struct hdaa_pcm_devinfo *pdevinfo = as->pdevinfo; struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_chan *ch = &devinfo->chans[as->chans[0]]; struct hdaa_widget *w; uint8_t *eld; int i, total, sub, assume, channels; uint16_t cpins, upins, tpins; cpins = upins = 0; eld = NULL; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; w = hdaa_widget_get(devinfo, as->pins[i]); if (w == NULL) continue; if (w->wclass.pin.connected == 1) cpins |= (1 << i); else if (w->wclass.pin.connected != 0) upins |= (1 << i); if (w->eld != NULL && w->eld_len >= 8) eld = w->eld; } tpins = cpins | upins; if (as->hpredir >= 0) tpins &= 0x7fff; if (tpins == 0) tpins = as->pinset; total = sub = assume = channels = 0; if (eld) { /* Map CEA speakers to sound(4) channels. */ if (eld[7] & 0x01) /* Front Left/Right */ channels |= SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR; if (eld[7] & 0x02) /* Low Frequency Effect */ channels |= SND_CHN_T_MASK_LF; if (eld[7] & 0x04) /* Front Center */ channels |= SND_CHN_T_MASK_FC; if (eld[7] & 0x08) { /* Rear Left/Right */ /* If we have both RLR and RLRC, report RLR as side. */ if (eld[7] & 0x40) /* Rear Left/Right Center */ channels |= SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR; else channels |= SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR; } if (eld[7] & 0x10) /* Rear center */ channels |= SND_CHN_T_MASK_BC; if (eld[7] & 0x20) /* Front Left/Right Center */ channels |= SND_CHN_T_MASK_FLC | SND_CHN_T_MASK_FRC; if (eld[7] & 0x40) /* Rear Left/Right Center */ channels |= SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR; } else if (as->pinset != 0 && (tpins & 0xffe0) == 0) { /* Map UAA speakers to sound(4) channels. */ if (tpins & 0x0001) channels |= SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR; if (tpins & 0x0002) channels |= SND_CHN_T_MASK_FC | SND_CHN_T_MASK_LF; if (tpins & 0x0004) channels |= SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR; if (tpins & 0x0008) channels |= SND_CHN_T_MASK_FLC | SND_CHN_T_MASK_FRC; if (tpins & 0x0010) { /* If there is no back pin, report side as back. */ if ((as->pinset & 0x0004) == 0) channels |= SND_CHN_T_MASK_BL | SND_CHN_T_MASK_BR; else channels |= SND_CHN_T_MASK_SL | SND_CHN_T_MASK_SR; } } else if (as->mixed) { /* Mixed assoc can be only stereo or theoretically mono. */ if (ch->channels == 1) channels |= SND_CHN_T_MASK_FC; else channels |= SND_CHN_T_MASK_FL | SND_CHN_T_MASK_FR; } if (channels) { /* We have some usable channels info. */ HDA_BOOTVERBOSE( device_printf(pdevinfo->dev, "%s channel set is: ", as->dir == HDAA_CTL_OUT ? "Playback" : "Recording"); for (i = 0; i < SND_CHN_T_MAX; i++) if (channels & (1 << i)) printf("%s, ", channel_names[i]); printf("\n"); ); /* Look for maximal fitting matrix. */ for (i = 0; i < sizeof(matrixes) / sizeof(struct matrix); i++) { if (as->pinset != 0 && matrixes[i].analog == 0) continue; if ((matrixes[i].m.mask & ~channels) == 0) { total = matrixes[i].m.channels; sub = matrixes[i].m.ext; } } } if (total == 0) { assume = 1; total = ch->channels; sub = (total == 6 || total == 8) ? 1 : 0; } HDA_BOOTVERBOSE( device_printf(pdevinfo->dev, "%s channel matrix is: %s%d.%d (%s)\n", as->dir == HDAA_CTL_OUT ? "Playback" : "Recording", assume ? "unknown, assuming " : "", total - sub, sub, cpins != 0 ? "connected" : (upins != 0 ? "unknown" : "disconnected")); ); } /* * Headphones redirection change handler. */ static void hdaa_hpredir_handler(struct hdaa_widget *w) { struct hdaa_devinfo *devinfo = w->devinfo; struct hdaa_audio_as *as = &devinfo->as[w->bindas]; struct hdaa_widget *w1; struct hdaa_audio_ctl *ctl; uint32_t val; int j, connected = w->wclass.pin.connected; HDA_BOOTVERBOSE( device_printf((as->pdevinfo && as->pdevinfo->dev) ? as->pdevinfo->dev : devinfo->dev, "Redirect output to: %s\n", connected ? "headphones": "main"); ); /* (Un)Mute headphone pin. */ ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, -1, 1); if (ctl != NULL && ctl->mute) { /* If pin has muter - use it. */ val = connected ? 0 : 1; if (val != ctl->forcemute) { ctl->forcemute = val; hdaa_audio_ctl_amp_set(ctl, HDAA_AMP_MUTE_DEFAULT, HDAA_AMP_VOL_DEFAULT, HDAA_AMP_VOL_DEFAULT); } } else { /* If there is no muter - disable pin output. */ if (connected) val = w->wclass.pin.ctrl | HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE; else val = w->wclass.pin.ctrl & ~HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE; if (val != w->wclass.pin.ctrl) { w->wclass.pin.ctrl = val; hda_command(devinfo->dev, HDA_CMD_SET_PIN_WIDGET_CTRL(0, w->nid, w->wclass.pin.ctrl)); } } /* (Un)Mute other pins. */ for (j = 0; j < 15; j++) { if (as->pins[j] <= 0) continue; ctl = hdaa_audio_ctl_amp_get(devinfo, as->pins[j], HDAA_CTL_IN, -1, 1); if (ctl != NULL && ctl->mute) { /* If pin has muter - use it. */ val = connected ? 1 : 0; if (val == ctl->forcemute) continue; ctl->forcemute = val; hdaa_audio_ctl_amp_set(ctl, HDAA_AMP_MUTE_DEFAULT, HDAA_AMP_VOL_DEFAULT, HDAA_AMP_VOL_DEFAULT); continue; } /* If there is no muter - disable pin output. */ w1 = hdaa_widget_get(devinfo, as->pins[j]); if (w1 != NULL) { if (connected) val = w1->wclass.pin.ctrl & ~HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE; else val = w1->wclass.pin.ctrl | HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE; if (val != w1->wclass.pin.ctrl) { w1->wclass.pin.ctrl = val; hda_command(devinfo->dev, HDA_CMD_SET_PIN_WIDGET_CTRL(0, w1->nid, w1->wclass.pin.ctrl)); } } } } /* * Recording source change handler. */ static void hdaa_autorecsrc_handler(struct hdaa_audio_as *as, struct hdaa_widget *w) { struct hdaa_pcm_devinfo *pdevinfo = as->pdevinfo; struct hdaa_devinfo *devinfo; struct hdaa_widget *w1; int i, mask, fullmask, prio, bestprio; char buf[128]; if (!as->mixed || pdevinfo == NULL || pdevinfo->mixer == NULL) return; /* Don't touch anything if we asked not to. */ if (pdevinfo->autorecsrc == 0 || (pdevinfo->autorecsrc == 1 && w != NULL)) return; /* Don't touch anything if "mix" or "speaker" selected. */ if (pdevinfo->recsrc & (SOUND_MASK_IMIX | SOUND_MASK_SPEAKER)) return; /* Don't touch anything if several selected. */ if (ffs(pdevinfo->recsrc) != fls(pdevinfo->recsrc)) return; devinfo = pdevinfo->devinfo; mask = fullmask = 0; bestprio = 0; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; w1 = hdaa_widget_get(devinfo, as->pins[i]); if (w1 == NULL || w1->enable == 0) continue; if (w1->wclass.pin.connected == 0) continue; prio = (w1->wclass.pin.connected == 1) ? 2 : 1; if (prio < bestprio) continue; if (prio > bestprio) { mask = 0; bestprio = prio; } mask |= (1 << w1->ossdev); fullmask |= (1 << w1->ossdev); } if (mask == 0) return; /* Prefer newly connected input. */ if (w != NULL && (mask & (1 << w->ossdev))) mask = (1 << w->ossdev); /* Prefer previously selected input */ if (mask & pdevinfo->recsrc) mask &= pdevinfo->recsrc; /* Prefer mic. */ if (mask & SOUND_MASK_MIC) mask = SOUND_MASK_MIC; /* Prefer monitor (2nd mic). */ if (mask & SOUND_MASK_MONITOR) mask = SOUND_MASK_MONITOR; /* Just take first one. */ mask = (1 << (ffs(mask) - 1)); HDA_BOOTVERBOSE( hdaa_audio_ctl_ossmixer_mask2allname(mask, buf, sizeof(buf)); device_printf(pdevinfo->dev, "Automatically set rec source to: %s\n", buf); ); hdaa_unlock(devinfo); mix_setrecsrc(pdevinfo->mixer, mask); hdaa_lock(devinfo); } /* * Jack presence detection event handler. */ static void hdaa_presence_handler(struct hdaa_widget *w) { struct hdaa_devinfo *devinfo = w->devinfo; struct hdaa_audio_as *as; uint32_t res; int connected, old; if (w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) return; if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(w->wclass.pin.cap) == 0 || (HDA_CONFIG_DEFAULTCONF_MISC(w->wclass.pin.config) & 1) != 0) return; res = hda_command(devinfo->dev, HDA_CMD_GET_PIN_SENSE(0, w->nid)); connected = (res & HDA_CMD_GET_PIN_SENSE_PRESENCE_DETECT) != 0; if (devinfo->quirks & HDAA_QUIRK_SENSEINV) connected = !connected; old = w->wclass.pin.connected; if (connected == old) return; w->wclass.pin.connected = connected; HDA_BOOTVERBOSE( if (connected || old != 2) { device_printf(devinfo->dev, "Pin sense: nid=%d sense=0x%08x (%sconnected)\n", w->nid, res, !connected ? "dis" : ""); } ); as = &devinfo->as[w->bindas]; if (as->hpredir >= 0 && as->pins[15] == w->nid) hdaa_hpredir_handler(w); if (as->dir == HDAA_CTL_IN && old != 2) hdaa_autorecsrc_handler(as, w); if (old != 2) hdaa_channels_handler(as); } /* * Callback for poll based presence detection. */ static void hdaa_jack_poll_callback(void *arg) { struct hdaa_devinfo *devinfo = arg; struct hdaa_widget *w; int i; hdaa_lock(devinfo); if (devinfo->poll_ival == 0) { hdaa_unlock(devinfo); return; } for (i = 0; i < devinfo->ascnt; i++) { if (devinfo->as[i].hpredir < 0) continue; w = hdaa_widget_get(devinfo, devinfo->as[i].pins[15]); if (w == NULL || w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; hdaa_presence_handler(w); } callout_reset(&devinfo->poll_jack, devinfo->poll_ival, hdaa_jack_poll_callback, devinfo); hdaa_unlock(devinfo); } static void hdaa_eld_dump(struct hdaa_widget *w) { struct hdaa_devinfo *devinfo = w->devinfo; device_t dev = devinfo->dev; uint8_t *sad; int len, mnl, i, sadc, fmt; if (w->eld == NULL || w->eld_len < 4) return; device_printf(dev, "ELD nid=%d: ELD_Ver=%u Baseline_ELD_Len=%u\n", w->nid, w->eld[0] >> 3, w->eld[2]); if ((w->eld[0] >> 3) != 0x02) return; len = min(w->eld_len, (u_int)w->eld[2] * 4); mnl = w->eld[4] & 0x1f; device_printf(dev, "ELD nid=%d: CEA_EDID_Ver=%u MNL=%u\n", w->nid, w->eld[4] >> 5, mnl); sadc = w->eld[5] >> 4; device_printf(dev, "ELD nid=%d: SAD_Count=%u Conn_Type=%u S_AI=%u HDCP=%u\n", w->nid, sadc, (w->eld[5] >> 2) & 0x3, (w->eld[5] >> 1) & 0x1, w->eld[5] & 0x1); device_printf(dev, "ELD nid=%d: Aud_Synch_Delay=%ums\n", w->nid, w->eld[6] * 2); device_printf(dev, "ELD nid=%d: Channels=0x%b\n", w->nid, w->eld[7], "\020\07RLRC\06FLRC\05RC\04RLR\03FC\02LFE\01FLR"); device_printf(dev, "ELD nid=%d: Port_ID=0x%02x%02x%02x%02x%02x%02x%02x%02x\n", w->nid, w->eld[8], w->eld[9], w->eld[10], w->eld[11], w->eld[12], w->eld[13], w->eld[14], w->eld[15]); device_printf(dev, "ELD nid=%d: Manufacturer_Name=0x%02x%02x\n", w->nid, w->eld[16], w->eld[17]); device_printf(dev, "ELD nid=%d: Product_Code=0x%02x%02x\n", w->nid, w->eld[18], w->eld[19]); device_printf(dev, "ELD nid=%d: Monitor_Name_String='%.*s'\n", w->nid, mnl, &w->eld[20]); for (i = 0; i < sadc; i++) { sad = &w->eld[20 + mnl + i * 3]; fmt = (sad[0] >> 3) & 0x0f; if (fmt == HDA_HDMI_CODING_TYPE_REF_CTX) { fmt = (sad[2] >> 3) & 0x1f; if (fmt < 1 || fmt > 3) fmt = 0; else fmt += 14; } device_printf(dev, "ELD nid=%d: %s %dch freqs=0x%b", w->nid, HDA_HDMI_CODING_TYPES[fmt], (sad[0] & 0x07) + 1, sad[1], "\020\007192\006176\00596\00488\00348\00244\00132"); switch (fmt) { case HDA_HDMI_CODING_TYPE_LPCM: printf(" sizes=0x%b", sad[2] & 0x07, "\020\00324\00220\00116"); break; case HDA_HDMI_CODING_TYPE_AC3: case HDA_HDMI_CODING_TYPE_MPEG1: case HDA_HDMI_CODING_TYPE_MP3: case HDA_HDMI_CODING_TYPE_MPEG2: case HDA_HDMI_CODING_TYPE_AACLC: case HDA_HDMI_CODING_TYPE_DTS: case HDA_HDMI_CODING_TYPE_ATRAC: printf(" max_bitrate=%d", sad[2] * 8000); break; case HDA_HDMI_CODING_TYPE_WMAPRO: printf(" profile=%d", sad[2] & 0x07); break; } printf("\n"); } } static void hdaa_eld_handler(struct hdaa_widget *w) { struct hdaa_devinfo *devinfo = w->devinfo; uint32_t res; int i; if (w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) return; if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(w->wclass.pin.cap) == 0 || (HDA_CONFIG_DEFAULTCONF_MISC(w->wclass.pin.config) & 1) != 0) return; res = hda_command(devinfo->dev, HDA_CMD_GET_PIN_SENSE(0, w->nid)); if ((w->eld != 0) == ((res & HDA_CMD_GET_PIN_SENSE_ELD_VALID) != 0)) return; if (w->eld != NULL) { w->eld_len = 0; free(w->eld, M_HDAA); w->eld = NULL; } HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Pin sense: nid=%d sense=0x%08x " "(%sconnected, ELD %svalid)\n", w->nid, res, (res & HDA_CMD_GET_PIN_SENSE_PRESENCE_DETECT) ? "" : "dis", (res & HDA_CMD_GET_PIN_SENSE_ELD_VALID) ? "" : "in"); ); if ((res & HDA_CMD_GET_PIN_SENSE_ELD_VALID) == 0) return; res = hda_command(devinfo->dev, HDA_CMD_GET_HDMI_DIP_SIZE(0, w->nid, 0x08)); if (res == HDA_INVALID) return; w->eld_len = res & 0xff; if (w->eld_len != 0) w->eld = malloc(w->eld_len, M_HDAA, M_ZERO | M_NOWAIT); if (w->eld == NULL) { w->eld_len = 0; return; } for (i = 0; i < w->eld_len; i++) { res = hda_command(devinfo->dev, HDA_CMD_GET_HDMI_ELDD(0, w->nid, i)); if (res & 0x80000000) w->eld[i] = res & 0xff; } HDA_BOOTVERBOSE( hdaa_eld_dump(w); ); hdaa_channels_handler(&devinfo->as[w->bindas]); } /* * Pin sense initializer. */ static void hdaa_sense_init(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as; struct hdaa_widget *w; int i, poll = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (HDA_PARAM_AUDIO_WIDGET_CAP_UNSOL_CAP(w->param.widget_cap)) { if (w->unsol < 0) w->unsol = HDAC_UNSOL_ALLOC( device_get_parent(devinfo->dev), devinfo->dev, w->nid); hda_command(devinfo->dev, HDA_CMD_SET_UNSOLICITED_RESPONSE(0, w->nid, HDA_CMD_SET_UNSOLICITED_RESPONSE_ENABLE | w->unsol)); } as = &devinfo->as[w->bindas]; if (as->hpredir >= 0 && as->pins[15] == w->nid) { if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(w->wclass.pin.cap) == 0 || (HDA_CONFIG_DEFAULTCONF_MISC(w->wclass.pin.config) & 1) != 0) { device_printf(devinfo->dev, "No presence detection support at nid %d\n", w->nid); } else { if (w->unsol < 0) poll = 1; HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Headphones redirection for " "association %d nid=%d using %s.\n", w->bindas, w->nid, (w->unsol < 0) ? "polling" : "unsolicited responses"); ); } } hdaa_presence_handler(w); if (!HDA_PARAM_PIN_CAP_DP(w->wclass.pin.cap) && !HDA_PARAM_PIN_CAP_HDMI(w->wclass.pin.cap)) continue; hdaa_eld_handler(w); } if (poll) { callout_reset(&devinfo->poll_jack, 1, hdaa_jack_poll_callback, devinfo); } } static void hdaa_sense_deinit(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; int i; callout_stop(&devinfo->poll_jack); for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (w->unsol < 0) continue; hda_command(devinfo->dev, HDA_CMD_SET_UNSOLICITED_RESPONSE(0, w->nid, 0)); HDAC_UNSOL_FREE( device_get_parent(devinfo->dev), devinfo->dev, w->unsol); w->unsol = -1; } } uint32_t hdaa_widget_pin_patch(uint32_t config, const char *str) { char buf[256]; char *key, *value, *rest, *bad; int ival, i; strlcpy(buf, str, sizeof(buf)); rest = buf; while ((key = strsep(&rest, "=")) != NULL) { value = strsep(&rest, " \t"); if (value == NULL) break; ival = strtol(value, &bad, 10); if (strcmp(key, "seq") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_SEQUENCE_MASK; config |= ((ival << HDA_CONFIG_DEFAULTCONF_SEQUENCE_SHIFT) & HDA_CONFIG_DEFAULTCONF_SEQUENCE_MASK); } else if (strcmp(key, "as") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_ASSOCIATION_MASK; config |= ((ival << HDA_CONFIG_DEFAULTCONF_ASSOCIATION_SHIFT) & HDA_CONFIG_DEFAULTCONF_ASSOCIATION_MASK); } else if (strcmp(key, "misc") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_MISC_MASK; config |= ((ival << HDA_CONFIG_DEFAULTCONF_MISC_SHIFT) & HDA_CONFIG_DEFAULTCONF_MISC_MASK); } else if (strcmp(key, "color") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_COLOR_MASK; if (bad[0] == 0) { config |= ((ival << HDA_CONFIG_DEFAULTCONF_COLOR_SHIFT) & HDA_CONFIG_DEFAULTCONF_COLOR_MASK); } for (i = 0; i < 16; i++) { if (strcasecmp(HDA_COLORS[i], value) == 0) { config |= (i << HDA_CONFIG_DEFAULTCONF_COLOR_SHIFT); break; } } } else if (strcmp(key, "ctype") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE_MASK; if (bad[0] == 0) { config |= ((ival << HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE_SHIFT) & HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE_MASK); } for (i = 0; i < 16; i++) { if (strcasecmp(HDA_CONNECTORS[i], value) == 0) { config |= (i << HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE_SHIFT); break; } } } else if (strcmp(key, "device") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_DEVICE_MASK; if (bad[0] == 0) { config |= ((ival << HDA_CONFIG_DEFAULTCONF_DEVICE_SHIFT) & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK); continue; } for (i = 0; i < 16; i++) { if (strcasecmp(HDA_DEVS[i], value) == 0) { config |= (i << HDA_CONFIG_DEFAULTCONF_DEVICE_SHIFT); break; } } } else if (strcmp(key, "loc") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_LOCATION_MASK; if (bad[0] == 0) { config |= ((ival << HDA_CONFIG_DEFAULTCONF_LOCATION_SHIFT) & HDA_CONFIG_DEFAULTCONF_LOCATION_MASK); continue; } for (i = 0; i < 64; i++) { if (strcasecmp(HDA_LOCS[i], value) == 0) { config |= (i << HDA_CONFIG_DEFAULTCONF_LOCATION_SHIFT); break; } } } else if (strcmp(key, "conn") == 0) { config &= ~HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK; if (bad[0] == 0) { config |= ((ival << HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_SHIFT) & HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK); continue; } for (i = 0; i < 4; i++) { if (strcasecmp(HDA_CONNS[i], value) == 0) { config |= (i << HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_SHIFT); break; } } } } return (config); } uint32_t hdaa_gpio_patch(uint32_t gpio, const char *str) { char buf[256]; char *key, *value, *rest; int ikey, i; strlcpy(buf, str, sizeof(buf)); rest = buf; while ((key = strsep(&rest, "=")) != NULL) { value = strsep(&rest, " \t"); if (value == NULL) break; ikey = strtol(key, NULL, 10); if (ikey < 0 || ikey > 7) continue; for (i = 0; i < 7; i++) { if (strcasecmp(HDA_GPIO_ACTIONS[i], value) == 0) { gpio &= ~HDAA_GPIO_MASK(ikey); gpio |= i << HDAA_GPIO_SHIFT(ikey); break; } } } return (gpio); } static void hdaa_local_patch_pin(struct hdaa_widget *w) { device_t dev = w->devinfo->dev; const char *res = NULL; uint32_t config, orig; char buf[32]; config = orig = w->wclass.pin.config; snprintf(buf, sizeof(buf), "cad%u.nid%u.config", hda_get_codec_id(dev), w->nid); if (resource_string_value(device_get_name( device_get_parent(device_get_parent(dev))), device_get_unit(device_get_parent(device_get_parent(dev))), buf, &res) == 0) { if (strncmp(res, "0x", 2) == 0) { config = strtol(res + 2, NULL, 16); } else { config = hdaa_widget_pin_patch(config, res); } } snprintf(buf, sizeof(buf), "nid%u.config", w->nid); if (resource_string_value(device_get_name(dev), device_get_unit(dev), buf, &res) == 0) { if (strncmp(res, "0x", 2) == 0) { config = strtol(res + 2, NULL, 16); } else { config = hdaa_widget_pin_patch(config, res); } } HDA_BOOTVERBOSE( if (config != orig) device_printf(w->devinfo->dev, "Patching pin config nid=%u 0x%08x -> 0x%08x\n", w->nid, orig, config); ); w->wclass.pin.newconf = w->wclass.pin.config = config; } static void hdaa_dump_audio_formats_sb(struct sbuf *sb, uint32_t fcap, uint32_t pcmcap) { uint32_t cap; cap = fcap; if (cap != 0) { sbuf_printf(sb, " Stream cap: 0x%08x", cap); if (HDA_PARAM_SUPP_STREAM_FORMATS_AC3(cap)) sbuf_printf(sb, " AC3"); if (HDA_PARAM_SUPP_STREAM_FORMATS_FLOAT32(cap)) sbuf_printf(sb, " FLOAT32"); if (HDA_PARAM_SUPP_STREAM_FORMATS_PCM(cap)) sbuf_printf(sb, " PCM"); sbuf_printf(sb, "\n"); } cap = pcmcap; if (cap != 0) { sbuf_printf(sb, " PCM cap: 0x%08x", cap); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8BIT(cap)) sbuf_printf(sb, " 8"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16BIT(cap)) sbuf_printf(sb, " 16"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_20BIT(cap)) sbuf_printf(sb, " 20"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_24BIT(cap)) sbuf_printf(sb, " 24"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32BIT(cap)) sbuf_printf(sb, " 32"); sbuf_printf(sb, " bits,"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8KHZ(cap)) sbuf_printf(sb, " 8"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_11KHZ(cap)) sbuf_printf(sb, " 11"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16KHZ(cap)) sbuf_printf(sb, " 16"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_22KHZ(cap)) sbuf_printf(sb, " 22"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32KHZ(cap)) sbuf_printf(sb, " 32"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_44KHZ(cap)) sbuf_printf(sb, " 44"); sbuf_printf(sb, " 48"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_88KHZ(cap)) sbuf_printf(sb, " 88"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_96KHZ(cap)) sbuf_printf(sb, " 96"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_176KHZ(cap)) sbuf_printf(sb, " 176"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_192KHZ(cap)) sbuf_printf(sb, " 192"); sbuf_printf(sb, " KHz\n"); } } static void hdaa_dump_pin_sb(struct sbuf *sb, struct hdaa_widget *w) { uint32_t pincap, conf; pincap = w->wclass.pin.cap; sbuf_printf(sb, " Pin cap: 0x%08x", pincap); if (HDA_PARAM_PIN_CAP_IMP_SENSE_CAP(pincap)) sbuf_printf(sb, " ISC"); if (HDA_PARAM_PIN_CAP_TRIGGER_REQD(pincap)) sbuf_printf(sb, " TRQD"); if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(pincap)) sbuf_printf(sb, " PDC"); if (HDA_PARAM_PIN_CAP_HEADPHONE_CAP(pincap)) sbuf_printf(sb, " HP"); if (HDA_PARAM_PIN_CAP_OUTPUT_CAP(pincap)) sbuf_printf(sb, " OUT"); if (HDA_PARAM_PIN_CAP_INPUT_CAP(pincap)) sbuf_printf(sb, " IN"); if (HDA_PARAM_PIN_CAP_BALANCED_IO_PINS(pincap)) sbuf_printf(sb, " BAL"); if (HDA_PARAM_PIN_CAP_HDMI(pincap)) sbuf_printf(sb, " HDMI"); if (HDA_PARAM_PIN_CAP_VREF_CTRL(pincap)) { sbuf_printf(sb, " VREF["); if (HDA_PARAM_PIN_CAP_VREF_CTRL_50(pincap)) sbuf_printf(sb, " 50"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_80(pincap)) sbuf_printf(sb, " 80"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_100(pincap)) sbuf_printf(sb, " 100"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_GROUND(pincap)) sbuf_printf(sb, " GROUND"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_HIZ(pincap)) sbuf_printf(sb, " HIZ"); sbuf_printf(sb, " ]"); } if (HDA_PARAM_PIN_CAP_EAPD_CAP(pincap)) sbuf_printf(sb, " EAPD"); if (HDA_PARAM_PIN_CAP_DP(pincap)) sbuf_printf(sb, " DP"); if (HDA_PARAM_PIN_CAP_HBR(pincap)) sbuf_printf(sb, " HBR"); sbuf_printf(sb, "\n"); conf = w->wclass.pin.config; sbuf_printf(sb, " Pin config: 0x%08x", conf); sbuf_printf(sb, " as=%d seq=%d " "device=%s conn=%s ctype=%s loc=%s color=%s misc=%d\n", HDA_CONFIG_DEFAULTCONF_ASSOCIATION(conf), HDA_CONFIG_DEFAULTCONF_SEQUENCE(conf), HDA_DEVS[HDA_CONFIG_DEFAULTCONF_DEVICE(conf)], HDA_CONNS[HDA_CONFIG_DEFAULTCONF_CONNECTIVITY(conf)], HDA_CONNECTORS[HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE(conf)], HDA_LOCS[HDA_CONFIG_DEFAULTCONF_LOCATION(conf)], HDA_COLORS[HDA_CONFIG_DEFAULTCONF_COLOR(conf)], HDA_CONFIG_DEFAULTCONF_MISC(conf)); sbuf_printf(sb, " Pin control: 0x%08x", w->wclass.pin.ctrl); if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_HPHN_ENABLE) sbuf_printf(sb, " HP"); if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE) sbuf_printf(sb, " IN"); if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE) sbuf_printf(sb, " OUT"); if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) { if ((w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) == 0x03) sbuf_printf(sb, " HBR"); else if ((w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) != 0) sbuf_printf(sb, " EPTs"); } else { if ((w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) != 0) sbuf_printf(sb, " VREFs"); } sbuf_printf(sb, "\n"); } static void hdaa_dump_amp_sb(struct sbuf *sb, uint32_t cap, const char *banner) { int offset, size, step; offset = HDA_PARAM_OUTPUT_AMP_CAP_OFFSET(cap); size = HDA_PARAM_OUTPUT_AMP_CAP_STEPSIZE(cap); step = HDA_PARAM_OUTPUT_AMP_CAP_NUMSTEPS(cap); sbuf_printf(sb, " %s amp: 0x%08x " "mute=%d step=%d size=%d offset=%d (%+d/%+ddB)\n", banner, cap, HDA_PARAM_OUTPUT_AMP_CAP_MUTE_CAP(cap), step, size, offset, ((0 - offset) * (size + 1)) / 4, ((step - offset) * (size + 1)) / 4); } static int hdaa_sysctl_caps(SYSCTL_HANDLER_ARGS) { struct hdaa_devinfo *devinfo; struct hdaa_widget *w, *cw; struct sbuf sb; char buf[64]; int error, j; w = (struct hdaa_widget *)oidp->oid_arg1; devinfo = w->devinfo; sbuf_new_for_sysctl(&sb, NULL, 256, req); sbuf_printf(&sb, "%s%s\n", w->name, (w->enable == 0) ? " [DISABLED]" : ""); sbuf_printf(&sb, " Widget cap: 0x%08x", w->param.widget_cap); if (w->param.widget_cap & 0x0ee1) { if (HDA_PARAM_AUDIO_WIDGET_CAP_LR_SWAP(w->param.widget_cap)) sbuf_printf(&sb, " LRSWAP"); if (HDA_PARAM_AUDIO_WIDGET_CAP_POWER_CTRL(w->param.widget_cap)) sbuf_printf(&sb, " PWR"); if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) sbuf_printf(&sb, " DIGITAL"); if (HDA_PARAM_AUDIO_WIDGET_CAP_UNSOL_CAP(w->param.widget_cap)) sbuf_printf(&sb, " UNSOL"); if (HDA_PARAM_AUDIO_WIDGET_CAP_PROC_WIDGET(w->param.widget_cap)) sbuf_printf(&sb, " PROC"); if (HDA_PARAM_AUDIO_WIDGET_CAP_STRIPE(w->param.widget_cap)) sbuf_printf(&sb, " STRIPE(x%d)", 1 << (fls(w->wclass.conv.stripecap) - 1)); j = HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap); if (j == 1) sbuf_printf(&sb, " STEREO"); else if (j > 1) sbuf_printf(&sb, " %dCH", j + 1); } sbuf_printf(&sb, "\n"); if (w->bindas != -1) { sbuf_printf(&sb, " Association: %d (0x%04x)\n", w->bindas, w->bindseqmask); } if (w->ossmask != 0 || w->ossdev >= 0) { sbuf_printf(&sb, " OSS: %s", hdaa_audio_ctl_ossmixer_mask2allname(w->ossmask, buf, sizeof(buf))); if (w->ossdev >= 0) sbuf_printf(&sb, " (%s)", ossnames[w->ossdev]); sbuf_printf(&sb, "\n"); } if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) { hdaa_dump_audio_formats_sb(&sb, w->param.supp_stream_formats, w->param.supp_pcm_size_rate); } else if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->waspin) hdaa_dump_pin_sb(&sb, w); if (w->param.eapdbtl != HDA_INVALID) { sbuf_printf(&sb, " EAPD: 0x%08x%s%s%s\n", w->param.eapdbtl, (w->param.eapdbtl & HDA_CMD_SET_EAPD_BTL_ENABLE_LR_SWAP) ? " LRSWAP" : "", (w->param.eapdbtl & HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD) ? " EAPD" : "", (w->param.eapdbtl & HDA_CMD_SET_EAPD_BTL_ENABLE_BTL) ? " BTL" : ""); } if (HDA_PARAM_AUDIO_WIDGET_CAP_OUT_AMP(w->param.widget_cap) && w->param.outamp_cap != 0) hdaa_dump_amp_sb(&sb, w->param.outamp_cap, "Output"); if (HDA_PARAM_AUDIO_WIDGET_CAP_IN_AMP(w->param.widget_cap) && w->param.inamp_cap != 0) hdaa_dump_amp_sb(&sb, w->param.inamp_cap, " Input"); if (w->nconns > 0) sbuf_printf(&sb, " Connections: %d\n", w->nconns); for (j = 0; j < w->nconns; j++) { cw = hdaa_widget_get(devinfo, w->conns[j]); sbuf_printf(&sb, " + %s<- nid=%d [%s]", (w->connsenable[j] == 0)?"[DISABLED] ":"", w->conns[j], (cw == NULL) ? "GHOST!" : cw->name); if (cw == NULL) sbuf_printf(&sb, " [UNKNOWN]"); else if (cw->enable == 0) sbuf_printf(&sb, " [DISABLED]"); if (w->nconns > 1 && w->selconn == j && w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) sbuf_printf(&sb, " (selected)"); sbuf_printf(&sb, "\n"); } error = sbuf_finish(&sb); sbuf_delete(&sb); return (error); } static int hdaa_sysctl_config(SYSCTL_HANDLER_ARGS) { char buf[256]; int error; uint32_t conf; conf = *(uint32_t *)oidp->oid_arg1; snprintf(buf, sizeof(buf), "0x%08x as=%d seq=%d " "device=%s conn=%s ctype=%s loc=%s color=%s misc=%d", conf, HDA_CONFIG_DEFAULTCONF_ASSOCIATION(conf), HDA_CONFIG_DEFAULTCONF_SEQUENCE(conf), HDA_DEVS[HDA_CONFIG_DEFAULTCONF_DEVICE(conf)], HDA_CONNS[HDA_CONFIG_DEFAULTCONF_CONNECTIVITY(conf)], HDA_CONNECTORS[HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE(conf)], HDA_LOCS[HDA_CONFIG_DEFAULTCONF_LOCATION(conf)], HDA_COLORS[HDA_CONFIG_DEFAULTCONF_COLOR(conf)], HDA_CONFIG_DEFAULTCONF_MISC(conf)); error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); if (strncmp(buf, "0x", 2) == 0) conf = strtol(buf + 2, NULL, 16); else conf = hdaa_widget_pin_patch(conf, buf); *(uint32_t *)oidp->oid_arg1 = conf; return (0); } static void hdaa_config_fetch(const char *str, uint32_t *on, uint32_t *off) { int i = 0, j, k, len, inv; for (;;) { while (str[i] != '\0' && (str[i] == ',' || isspace(str[i]) != 0)) i++; if (str[i] == '\0') return; j = i; while (str[j] != '\0' && !(str[j] == ',' || isspace(str[j]) != 0)) j++; len = j - i; if (len > 2 && strncmp(str + i, "no", 2) == 0) inv = 2; else inv = 0; for (k = 0; len > inv && k < nitems(hdaa_quirks_tab); k++) { if (strncmp(str + i + inv, hdaa_quirks_tab[k].key, len - inv) != 0) continue; if (len - inv != strlen(hdaa_quirks_tab[k].key)) continue; if (inv == 0) { *on |= hdaa_quirks_tab[k].value; *off &= ~hdaa_quirks_tab[k].value; } else { *off |= hdaa_quirks_tab[k].value; *on &= ~hdaa_quirks_tab[k].value; } break; } i = j; } } static int hdaa_sysctl_quirks(SYSCTL_HANDLER_ARGS) { char buf[256]; int error, n = 0, i; uint32_t quirks, quirks_off; quirks = *(uint32_t *)oidp->oid_arg1; buf[0] = 0; for (i = 0; i < nitems(hdaa_quirks_tab); i++) { if ((quirks & hdaa_quirks_tab[i].value) != 0) n += snprintf(buf + n, sizeof(buf) - n, "%s%s", n != 0 ? "," : "", hdaa_quirks_tab[i].key); } error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); if (strncmp(buf, "0x", 2) == 0) quirks = strtol(buf + 2, NULL, 16); else { quirks = 0; hdaa_config_fetch(buf, &quirks, &quirks_off); } *(uint32_t *)oidp->oid_arg1 = quirks; return (0); } static void hdaa_local_patch(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; const char *res = NULL; uint32_t quirks_on = 0, quirks_off = 0, x; int i; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL) continue; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) hdaa_local_patch_pin(w); } if (resource_string_value(device_get_name(devinfo->dev), device_get_unit(devinfo->dev), "config", &res) == 0) { if (res != NULL && strlen(res) > 0) hdaa_config_fetch(res, &quirks_on, &quirks_off); devinfo->quirks |= quirks_on; devinfo->quirks &= ~quirks_off; } if (devinfo->newquirks == -1) devinfo->newquirks = devinfo->quirks; else devinfo->quirks = devinfo->newquirks; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, "Config options: 0x%08x\n", devinfo->quirks); ); if (resource_string_value(device_get_name(devinfo->dev), device_get_unit(devinfo->dev), "gpio_config", &res) == 0) { if (strncmp(res, "0x", 2) == 0) { devinfo->gpio = strtol(res + 2, NULL, 16); } else { devinfo->gpio = hdaa_gpio_patch(devinfo->gpio, res); } } if (devinfo->newgpio == -1) devinfo->newgpio = devinfo->gpio; else devinfo->gpio = devinfo->newgpio; if (devinfo->newgpo == -1) devinfo->newgpo = devinfo->gpo; else devinfo->gpo = devinfo->newgpo; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, "GPIO config options:"); for (i = 0; i < 7; i++) { x = (devinfo->gpio & HDAA_GPIO_MASK(i)) >> HDAA_GPIO_SHIFT(i); if (x != 0) printf(" %d=%s", i, HDA_GPIO_ACTIONS[x]); } printf("\n"); ); } static void hdaa_widget_connection_parse(struct hdaa_widget *w) { uint32_t res; int i, j, max, ents, entnum; nid_t nid = w->nid; nid_t cnid, addcnid, prevcnid; w->nconns = 0; res = hda_command(w->devinfo->dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_CONN_LIST_LENGTH)); ents = HDA_PARAM_CONN_LIST_LENGTH_LIST_LENGTH(res); if (ents < 1) return; entnum = HDA_PARAM_CONN_LIST_LENGTH_LONG_FORM(res) ? 2 : 4; max = (sizeof(w->conns) / sizeof(w->conns[0])) - 1; prevcnid = 0; #define CONN_RMASK(e) (1 << ((32 / (e)) - 1)) #define CONN_NMASK(e) (CONN_RMASK(e) - 1) #define CONN_RESVAL(r, e, n) ((r) >> ((32 / (e)) * (n))) #define CONN_RANGE(r, e, n) (CONN_RESVAL(r, e, n) & CONN_RMASK(e)) #define CONN_CNID(r, e, n) (CONN_RESVAL(r, e, n) & CONN_NMASK(e)) for (i = 0; i < ents; i += entnum) { res = hda_command(w->devinfo->dev, HDA_CMD_GET_CONN_LIST_ENTRY(0, nid, i)); for (j = 0; j < entnum; j++) { cnid = CONN_CNID(res, entnum, j); if (cnid == 0) { if (w->nconns < ents) device_printf(w->devinfo->dev, "WARNING: nid=%d has zero cnid " "entnum=%d j=%d index=%d " "entries=%d found=%d res=0x%08x\n", nid, entnum, j, i, ents, w->nconns, res); else goto getconns_out; } if (cnid < w->devinfo->startnode || cnid >= w->devinfo->endnode) { HDA_BOOTVERBOSE( device_printf(w->devinfo->dev, "WARNING: nid=%d has cnid outside " "of the AFG range j=%d " "entnum=%d index=%d res=0x%08x\n", nid, j, entnum, i, res); ); } if (CONN_RANGE(res, entnum, j) == 0) addcnid = cnid; else if (prevcnid == 0 || prevcnid >= cnid) { device_printf(w->devinfo->dev, "WARNING: Invalid child range " "nid=%d index=%d j=%d entnum=%d " "prevcnid=%d cnid=%d res=0x%08x\n", nid, i, j, entnum, prevcnid, cnid, res); addcnid = cnid; } else addcnid = prevcnid + 1; while (addcnid <= cnid) { if (w->nconns > max) { device_printf(w->devinfo->dev, "Adding %d (nid=%d): " "Max connection reached! max=%d\n", addcnid, nid, max + 1); goto getconns_out; } w->connsenable[w->nconns] = 1; w->conns[w->nconns++] = addcnid++; } prevcnid = cnid; } } getconns_out: return; } static void hdaa_widget_parse(struct hdaa_widget *w) { device_t dev = w->devinfo->dev; uint32_t wcap, cap; nid_t nid = w->nid; char buf[64]; w->param.widget_cap = wcap = hda_command(dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_AUDIO_WIDGET_CAP)); w->type = HDA_PARAM_AUDIO_WIDGET_CAP_TYPE(wcap); hdaa_widget_connection_parse(w); if (HDA_PARAM_AUDIO_WIDGET_CAP_OUT_AMP(wcap)) { if (HDA_PARAM_AUDIO_WIDGET_CAP_AMP_OVR(wcap)) w->param.outamp_cap = hda_command(dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_OUTPUT_AMP_CAP)); else w->param.outamp_cap = w->devinfo->outamp_cap; } else w->param.outamp_cap = 0; if (HDA_PARAM_AUDIO_WIDGET_CAP_IN_AMP(wcap)) { if (HDA_PARAM_AUDIO_WIDGET_CAP_AMP_OVR(wcap)) w->param.inamp_cap = hda_command(dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_INPUT_AMP_CAP)); else w->param.inamp_cap = w->devinfo->inamp_cap; } else w->param.inamp_cap = 0; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) { if (HDA_PARAM_AUDIO_WIDGET_CAP_FORMAT_OVR(wcap)) { cap = hda_command(dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_SUPP_STREAM_FORMATS)); w->param.supp_stream_formats = (cap != 0) ? cap : w->devinfo->supp_stream_formats; cap = hda_command(dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_SUPP_PCM_SIZE_RATE)); w->param.supp_pcm_size_rate = (cap != 0) ? cap : w->devinfo->supp_pcm_size_rate; } else { w->param.supp_stream_formats = w->devinfo->supp_stream_formats; w->param.supp_pcm_size_rate = w->devinfo->supp_pcm_size_rate; } if (HDA_PARAM_AUDIO_WIDGET_CAP_STRIPE(w->param.widget_cap)) { w->wclass.conv.stripecap = hda_command(dev, HDA_CMD_GET_STRIPE_CONTROL(0, w->nid)) >> 20; } else w->wclass.conv.stripecap = 1; } else { w->param.supp_stream_formats = 0; w->param.supp_pcm_size_rate = 0; } if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) { w->wclass.pin.original = w->wclass.pin.newconf = w->wclass.pin.config = hda_command(dev, HDA_CMD_GET_CONFIGURATION_DEFAULT(0, w->nid)); w->wclass.pin.cap = hda_command(dev, HDA_CMD_GET_PARAMETER(0, w->nid, HDA_PARAM_PIN_CAP)); w->wclass.pin.ctrl = hda_command(dev, HDA_CMD_GET_PIN_WIDGET_CTRL(0, nid)); w->wclass.pin.connected = 2; if (HDA_PARAM_PIN_CAP_EAPD_CAP(w->wclass.pin.cap)) { w->param.eapdbtl = hda_command(dev, HDA_CMD_GET_EAPD_BTL_ENABLE(0, nid)); w->param.eapdbtl &= 0x7; w->param.eapdbtl |= HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD; } else w->param.eapdbtl = HDA_INVALID; } w->unsol = -1; hdaa_unlock(w->devinfo); snprintf(buf, sizeof(buf), "nid%d", w->nid); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, buf, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, w, 0, hdaa_sysctl_caps, "A", "Node capabilities"); if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) { snprintf(buf, sizeof(buf), "nid%d_config", w->nid); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, buf, CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &w->wclass.pin.newconf, 0, hdaa_sysctl_config, "A", "Current pin configuration"); snprintf(buf, sizeof(buf), "nid%d_original", w->nid); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, buf, CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, &w->wclass.pin.original, 0, hdaa_sysctl_config, "A", "Original pin configuration"); } hdaa_lock(w->devinfo); } static void hdaa_widget_postprocess(struct hdaa_widget *w) { const char *typestr; w->type = HDA_PARAM_AUDIO_WIDGET_CAP_TYPE(w->param.widget_cap); switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT: typestr = "audio output"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT: typestr = "audio input"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER: typestr = "audio mixer"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR: typestr = "audio selector"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX: typestr = "pin"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_POWER_WIDGET: typestr = "power widget"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_VOLUME_WIDGET: typestr = "volume widget"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_BEEP_WIDGET: typestr = "beep widget"; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_VENDOR_WIDGET: typestr = "vendor widget"; break; default: typestr = "unknown type"; break; } strlcpy(w->name, typestr, sizeof(w->name)); if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) { uint32_t config; const char *devstr; int conn, color; config = w->wclass.pin.config; devstr = HDA_DEVS[(config & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK) >> HDA_CONFIG_DEFAULTCONF_DEVICE_SHIFT]; conn = (config & HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK) >> HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_SHIFT; color = (config & HDA_CONFIG_DEFAULTCONF_COLOR_MASK) >> HDA_CONFIG_DEFAULTCONF_COLOR_SHIFT; strlcat(w->name, ": ", sizeof(w->name)); strlcat(w->name, devstr, sizeof(w->name)); strlcat(w->name, " (", sizeof(w->name)); if (conn == 0 && color != 0 && color != 15) { strlcat(w->name, HDA_COLORS[color], sizeof(w->name)); strlcat(w->name, " ", sizeof(w->name)); } strlcat(w->name, HDA_CONNS[conn], sizeof(w->name)); strlcat(w->name, ")", sizeof(w->name)); } } struct hdaa_widget * hdaa_widget_get(struct hdaa_devinfo *devinfo, nid_t nid) { if (devinfo == NULL || devinfo->widget == NULL || nid < devinfo->startnode || nid >= devinfo->endnode) return (NULL); return (&devinfo->widget[nid - devinfo->startnode]); } static void hdaa_audio_ctl_amp_set_internal(struct hdaa_devinfo *devinfo, nid_t nid, int index, int lmute, int rmute, int left, int right, int dir) { uint16_t v = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, "Setting amplifier nid=%d index=%d %s mute=%d/%d vol=%d/%d\n", nid,index,dir ? "in" : "out",lmute,rmute,left,right); ); if (left != right || lmute != rmute) { v = (1 << (15 - dir)) | (1 << 13) | (index << 8) | (lmute << 7) | left; hda_command(devinfo->dev, HDA_CMD_SET_AMP_GAIN_MUTE(0, nid, v)); v = (1 << (15 - dir)) | (1 << 12) | (index << 8) | (rmute << 7) | right; } else v = (1 << (15 - dir)) | (3 << 12) | (index << 8) | (lmute << 7) | left; hda_command(devinfo->dev, HDA_CMD_SET_AMP_GAIN_MUTE(0, nid, v)); } static void hdaa_audio_ctl_amp_set(struct hdaa_audio_ctl *ctl, uint32_t mute, int left, int right) { nid_t nid; int lmute, rmute; nid = ctl->widget->nid; /* Save new values if valid. */ if (mute != HDAA_AMP_MUTE_DEFAULT) ctl->muted = mute; if (left != HDAA_AMP_VOL_DEFAULT) ctl->left = left; if (right != HDAA_AMP_VOL_DEFAULT) ctl->right = right; /* Prepare effective values */ if (ctl->forcemute) { lmute = 1; rmute = 1; left = 0; right = 0; } else { lmute = HDAA_AMP_LEFT_MUTED(ctl->muted); rmute = HDAA_AMP_RIGHT_MUTED(ctl->muted); left = ctl->left; right = ctl->right; } /* Apply effective values */ if (ctl->dir & HDAA_CTL_OUT) hdaa_audio_ctl_amp_set_internal(ctl->widget->devinfo, nid, ctl->index, lmute, rmute, left, right, 0); if (ctl->dir & HDAA_CTL_IN) hdaa_audio_ctl_amp_set_internal(ctl->widget->devinfo, nid, ctl->index, lmute, rmute, left, right, 1); } static void hdaa_widget_connection_select(struct hdaa_widget *w, uint8_t index) { if (w == NULL || w->nconns < 1 || index > (w->nconns - 1)) return; HDA_BOOTHVERBOSE( device_printf(w->devinfo->dev, "Setting selector nid=%d index=%d\n", w->nid, index); ); hda_command(w->devinfo->dev, HDA_CMD_SET_CONNECTION_SELECT_CONTROL(0, w->nid, index)); w->selconn = index; } /**************************************************************************** * Device Methods ****************************************************************************/ static void * hdaa_channel_init(kobj_t obj, void *data, struct snd_dbuf *b, struct pcm_channel *c, int dir) { struct hdaa_chan *ch = data; struct hdaa_pcm_devinfo *pdevinfo = ch->pdevinfo; struct hdaa_devinfo *devinfo = pdevinfo->devinfo; hdaa_lock(devinfo); if (devinfo->quirks & HDAA_QUIRK_FIXEDRATE) { ch->caps.minspeed = ch->caps.maxspeed = 48000; ch->pcmrates[0] = 48000; ch->pcmrates[1] = 0; } ch->dir = dir; ch->b = b; ch->c = c; ch->blksz = pdevinfo->chan_size / pdevinfo->chan_blkcnt; ch->blkcnt = pdevinfo->chan_blkcnt; hdaa_unlock(devinfo); if (sndbuf_alloc(ch->b, bus_get_dma_tag(devinfo->dev), hda_get_dma_nocache(devinfo->dev) ? BUS_DMA_NOCACHE : 0, pdevinfo->chan_size) != 0) return (NULL); return (ch); } static int hdaa_channel_setformat(kobj_t obj, void *data, uint32_t format) { struct hdaa_chan *ch = data; int i; for (i = 0; ch->caps.fmtlist[i] != 0; i++) { if (format == ch->caps.fmtlist[i]) { ch->fmt = format; return (0); } } return (EINVAL); } static uint32_t hdaa_channel_setspeed(kobj_t obj, void *data, uint32_t speed) { struct hdaa_chan *ch = data; uint32_t spd = 0, threshold; int i; /* First look for equal or multiple frequency. */ for (i = 0; ch->pcmrates[i] != 0; i++) { spd = ch->pcmrates[i]; if (speed != 0 && spd / speed * speed == spd) { ch->spd = spd; return (spd); } } /* If no match, just find nearest. */ for (i = 0; ch->pcmrates[i] != 0; i++) { spd = ch->pcmrates[i]; threshold = spd + ((ch->pcmrates[i + 1] != 0) ? ((ch->pcmrates[i + 1] - spd) >> 1) : 0); if (speed < threshold) break; } ch->spd = spd; return (spd); } static uint16_t hdaa_stream_format(struct hdaa_chan *ch) { int i; uint16_t fmt; fmt = 0; if (ch->fmt & AFMT_S16_LE) fmt |= ch->bit16 << 4; else if (ch->fmt & AFMT_S32_LE) fmt |= ch->bit32 << 4; else fmt |= 1 << 4; for (i = 0; i < HDA_RATE_TAB_LEN; i++) { if (hda_rate_tab[i].valid && ch->spd == hda_rate_tab[i].rate) { fmt |= hda_rate_tab[i].base; fmt |= hda_rate_tab[i].mul; fmt |= hda_rate_tab[i].div; break; } } fmt |= (AFMT_CHANNEL(ch->fmt) - 1); return (fmt); } static int hdaa_allowed_stripes(uint16_t fmt) { static const int bits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; int size; size = bits[(fmt >> 4) & 0x03]; size *= (fmt & 0x0f) + 1; size *= ((fmt >> 11) & 0x07) + 1; return (0xffffffffU >> (32 - fls(size / 8))); } static void hdaa_audio_setup(struct hdaa_chan *ch) { struct hdaa_audio_as *as = &ch->devinfo->as[ch->as]; struct hdaa_widget *w, *wp; int i, j, k, chn, cchn, totalchn, totalextchn, c; uint16_t fmt, dfmt; /* Mapping channel pairs to codec pins/converters. */ const static uint16_t convmap[2][5] = /* 1.0 2.0 4.0 5.1 7.1 */ {{ 0x0010, 0x0001, 0x0201, 0x0231, 0x4231 }, /* no dup. */ { 0x0010, 0x0001, 0x2201, 0x2231, 0x4231 }}; /* side dup. */ /* Mapping formats to HDMI channel allocations. */ const static uint8_t hdmica[2][8] = /* 1 2 3 4 5 6 7 8 */ {{ 0x02, 0x00, 0x04, 0x08, 0x0a, 0x0e, 0x12, 0x12 }, /* x.0 */ { 0x01, 0x03, 0x01, 0x03, 0x09, 0x0b, 0x0f, 0x13 }}; /* x.1 */ /* Mapping formats to HDMI channels order. */ const static uint32_t hdmich[2][8] = /* 1 / 5 2 / 6 3 / 7 4 / 8 */ {{ 0xFFFF0F00, 0xFFFFFF10, 0xFFF2FF10, 0xFF32FF10, 0xFF324F10, 0xF5324F10, 0x54326F10, 0x54326F10 }, /* x.0 */ { 0xFFFFF000, 0xFFFF0100, 0xFFFFF210, 0xFFFF2310, 0xFF32F410, 0xFF324510, 0xF6324510, 0x76325410 }}; /* x.1 */ int convmapid = -1; nid_t nid; uint8_t csum; totalchn = AFMT_CHANNEL(ch->fmt); totalextchn = AFMT_EXTCHANNEL(ch->fmt); HDA_BOOTHVERBOSE( device_printf(ch->pdevinfo->dev, "PCMDIR_%s: Stream setup fmt=%08x (%d.%d) speed=%d\n", (ch->dir == PCMDIR_PLAY) ? "PLAY" : "REC", ch->fmt, totalchn - totalextchn, totalextchn, ch->spd); ); fmt = hdaa_stream_format(ch); /* Set channels to I/O converters mapping for known speaker setups. */ if ((as->pinset == 0x0007 || as->pinset == 0x0013) || /* Standard 5.1 */ (as->pinset == 0x0017)) /* Standard 7.1 */ convmapid = (ch->dir == PCMDIR_PLAY); dfmt = HDA_CMD_SET_DIGITAL_CONV_FMT1_DIGEN; if (ch->fmt & AFMT_AC3) dfmt |= HDA_CMD_SET_DIGITAL_CONV_FMT1_NAUDIO; chn = 0; for (i = 0; ch->io[i] != -1; i++) { w = hdaa_widget_get(ch->devinfo, ch->io[i]); if (w == NULL) continue; /* If HP redirection is enabled, but failed to use same DAC, make last DAC to duplicate first one. */ if (as->fakeredir && i == (as->pincnt - 1)) { c = (ch->sid << 4); } else { /* Map channels to I/O converters, if set. */ if (convmapid >= 0) chn = (((convmap[convmapid][totalchn / 2] >> i * 4) & 0xf) - 1) * 2; if (chn < 0 || chn >= totalchn) { c = 0; } else { c = (ch->sid << 4) | chn; } } hda_command(ch->devinfo->dev, HDA_CMD_SET_CONV_FMT(0, ch->io[i], fmt)); if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) { hda_command(ch->devinfo->dev, HDA_CMD_SET_DIGITAL_CONV_FMT1(0, ch->io[i], dfmt)); } hda_command(ch->devinfo->dev, HDA_CMD_SET_CONV_STREAM_CHAN(0, ch->io[i], c)); if (HDA_PARAM_AUDIO_WIDGET_CAP_STRIPE(w->param.widget_cap)) { hda_command(ch->devinfo->dev, HDA_CMD_SET_STRIPE_CONTROL(0, w->nid, ch->stripectl)); } cchn = HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap); if (cchn > 1 && chn < totalchn) { cchn = min(cchn, totalchn - chn - 1); hda_command(ch->devinfo->dev, HDA_CMD_SET_CONV_CHAN_COUNT(0, ch->io[i], cchn)); } HDA_BOOTHVERBOSE( device_printf(ch->pdevinfo->dev, "PCMDIR_%s: Stream setup nid=%d: " "fmt=0x%04x, dfmt=0x%04x, chan=0x%04x, " "chan_count=0x%02x, stripe=%d\n", (ch->dir == PCMDIR_PLAY) ? "PLAY" : "REC", ch->io[i], fmt, dfmt, c, cchn, ch->stripectl); ); for (j = 0; j < 16; j++) { if (as->dacs[ch->asindex][j] != ch->io[i]) continue; nid = as->pins[j]; wp = hdaa_widget_get(ch->devinfo, nid); if (wp == NULL) continue; if (!HDA_PARAM_PIN_CAP_DP(wp->wclass.pin.cap) && !HDA_PARAM_PIN_CAP_HDMI(wp->wclass.pin.cap)) continue; /* Set channel mapping. */ for (k = 0; k < 8; k++) { hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_CHAN_SLOT(0, nid, (((hdmich[totalextchn == 0 ? 0 : 1][totalchn - 1] >> (k * 4)) & 0xf) << 4) | k)); } /* * Enable High Bit Rate (HBR) Encoded Packet Type * (EPT), if supported and needed (8ch data). */ if (HDA_PARAM_PIN_CAP_HDMI(wp->wclass.pin.cap) && HDA_PARAM_PIN_CAP_HBR(wp->wclass.pin.cap)) { wp->wclass.pin.ctrl &= ~HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK; if ((ch->fmt & AFMT_AC3) && (cchn == 7)) wp->wclass.pin.ctrl |= 0x03; hda_command(ch->devinfo->dev, HDA_CMD_SET_PIN_WIDGET_CTRL(0, nid, wp->wclass.pin.ctrl)); } /* Stop audio infoframe transmission. */ hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_INDEX(0, nid, 0x00)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_XMIT(0, nid, 0x00)); /* Clear audio infoframe buffer. */ hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_INDEX(0, nid, 0x00)); for (k = 0; k < 32; k++) hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x00)); /* Write HDMI/DisplayPort audio infoframe. */ hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_INDEX(0, nid, 0x00)); if (w->eld != NULL && w->eld_len >= 6 && ((w->eld[5] >> 2) & 0x3) == 1) { /* DisplayPort */ hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x84)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x1b)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x44)); } else { /* HDMI */ hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x84)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x01)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x0a)); csum = 0; csum -= 0x84 + 0x01 + 0x0a + (totalchn - 1) + hdmica[totalextchn == 0 ? 0 : 1][totalchn - 1]; hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, csum)); } hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, totalchn - 1)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x00)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, 0x00)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_DATA(0, nid, hdmica[totalextchn == 0 ? 0 : 1][totalchn - 1])); /* Start audio infoframe transmission. */ hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_INDEX(0, nid, 0x00)); hda_command(ch->devinfo->dev, HDA_CMD_SET_HDMI_DIP_XMIT(0, nid, 0xc0)); } chn += cchn + 1; } } /* * Greatest Common Divisor. */ static unsigned gcd(unsigned a, unsigned b) { u_int c; while (b != 0) { c = a; a = b; b = (c % b); } return (a); } /* * Least Common Multiple. */ static unsigned lcm(unsigned a, unsigned b) { return ((a * b) / gcd(a, b)); } static int hdaa_channel_setfragments(kobj_t obj, void *data, uint32_t blksz, uint32_t blkcnt) { struct hdaa_chan *ch = data; blksz -= blksz % lcm(HDA_DMA_ALIGNMENT, sndbuf_getalign(ch->b)); if (blksz > (sndbuf_getmaxsize(ch->b) / HDA_BDL_MIN)) blksz = sndbuf_getmaxsize(ch->b) / HDA_BDL_MIN; if (blksz < HDA_BLK_MIN) blksz = HDA_BLK_MIN; if (blkcnt > HDA_BDL_MAX) blkcnt = HDA_BDL_MAX; if (blkcnt < HDA_BDL_MIN) blkcnt = HDA_BDL_MIN; while ((blksz * blkcnt) > sndbuf_getmaxsize(ch->b)) { if ((blkcnt >> 1) >= HDA_BDL_MIN) blkcnt >>= 1; else if ((blksz >> 1) >= HDA_BLK_MIN) blksz >>= 1; else break; } if ((sndbuf_getblksz(ch->b) != blksz || sndbuf_getblkcnt(ch->b) != blkcnt) && sndbuf_resize(ch->b, blkcnt, blksz) != 0) device_printf(ch->devinfo->dev, "%s: failed blksz=%u blkcnt=%u\n", __func__, blksz, blkcnt); ch->blksz = sndbuf_getblksz(ch->b); ch->blkcnt = sndbuf_getblkcnt(ch->b); return (0); } static uint32_t hdaa_channel_setblocksize(kobj_t obj, void *data, uint32_t blksz) { struct hdaa_chan *ch = data; hdaa_channel_setfragments(obj, data, blksz, ch->pdevinfo->chan_blkcnt); return (ch->blksz); } static void hdaa_channel_stop(struct hdaa_chan *ch) { struct hdaa_devinfo *devinfo = ch->devinfo; struct hdaa_widget *w; int i; if ((ch->flags & HDAA_CHN_RUNNING) == 0) return; ch->flags &= ~HDAA_CHN_RUNNING; HDAC_STREAM_STOP(device_get_parent(devinfo->dev), devinfo->dev, ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid); for (i = 0; ch->io[i] != -1; i++) { w = hdaa_widget_get(ch->devinfo, ch->io[i]); if (w == NULL) continue; if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) { hda_command(devinfo->dev, HDA_CMD_SET_DIGITAL_CONV_FMT1(0, ch->io[i], 0)); } hda_command(devinfo->dev, HDA_CMD_SET_CONV_STREAM_CHAN(0, ch->io[i], 0)); } HDAC_STREAM_FREE(device_get_parent(devinfo->dev), devinfo->dev, ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid); } static int hdaa_channel_start(struct hdaa_chan *ch) { struct hdaa_devinfo *devinfo = ch->devinfo; uint32_t fmt; fmt = hdaa_stream_format(ch); ch->stripectl = fls(ch->stripecap & hdaa_allowed_stripes(fmt) & hda_get_stripes_mask(devinfo->dev)) - 1; ch->sid = HDAC_STREAM_ALLOC(device_get_parent(devinfo->dev), devinfo->dev, ch->dir == PCMDIR_PLAY ? 1 : 0, fmt, ch->stripectl, &ch->dmapos); if (ch->sid <= 0) return (EBUSY); hdaa_audio_setup(ch); HDAC_STREAM_RESET(device_get_parent(devinfo->dev), devinfo->dev, ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid); HDAC_STREAM_START(device_get_parent(devinfo->dev), devinfo->dev, ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid, sndbuf_getbufaddr(ch->b), ch->blksz, ch->blkcnt); ch->flags |= HDAA_CHN_RUNNING; return (0); } static int hdaa_channel_trigger(kobj_t obj, void *data, int go) { struct hdaa_chan *ch = data; int error = 0; if (!PCMTRIG_COMMON(go)) return (0); hdaa_lock(ch->devinfo); switch (go) { case PCMTRIG_START: error = hdaa_channel_start(ch); break; case PCMTRIG_STOP: case PCMTRIG_ABORT: hdaa_channel_stop(ch); break; default: break; } hdaa_unlock(ch->devinfo); return (error); } static uint32_t hdaa_channel_getptr(kobj_t obj, void *data) { struct hdaa_chan *ch = data; struct hdaa_devinfo *devinfo = ch->devinfo; uint32_t ptr; hdaa_lock(devinfo); if (ch->dmapos != NULL) { ptr = *(ch->dmapos); } else { ptr = HDAC_STREAM_GETPTR( device_get_parent(devinfo->dev), devinfo->dev, ch->dir == PCMDIR_PLAY ? 1 : 0, ch->sid); } hdaa_unlock(devinfo); /* * Round to available space and force 128 bytes aligment. */ ptr %= ch->blksz * ch->blkcnt; ptr &= HDA_BLK_ALIGN; return (ptr); } static struct pcmchan_caps * hdaa_channel_getcaps(kobj_t obj, void *data) { return (&((struct hdaa_chan *)data)->caps); } static kobj_method_t hdaa_channel_methods[] = { KOBJMETHOD(channel_init, hdaa_channel_init), KOBJMETHOD(channel_setformat, hdaa_channel_setformat), KOBJMETHOD(channel_setspeed, hdaa_channel_setspeed), KOBJMETHOD(channel_setblocksize, hdaa_channel_setblocksize), KOBJMETHOD(channel_setfragments, hdaa_channel_setfragments), KOBJMETHOD(channel_trigger, hdaa_channel_trigger), KOBJMETHOD(channel_getptr, hdaa_channel_getptr), KOBJMETHOD(channel_getcaps, hdaa_channel_getcaps), KOBJMETHOD_END }; CHANNEL_DECLARE(hdaa_channel); static int hdaa_audio_ctl_ossmixer_init(struct snd_mixer *m) { struct hdaa_pcm_devinfo *pdevinfo = mix_getdevinfo(m); struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w, *cw; uint32_t mask, recmask; int i, j; hdaa_lock(devinfo); pdevinfo->mixer = m; /* Make sure that in case of soft volume it won't stay muted. */ for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { pdevinfo->left[i] = 100; pdevinfo->right[i] = 100; } /* Declare volume controls assigned to this association. */ mask = pdevinfo->ossmask; if (pdevinfo->playas >= 0) { /* Declate EAPD as ogain control. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->param.eapdbtl == HDA_INVALID || w->bindas != pdevinfo->playas) continue; mask |= SOUND_MASK_OGAIN; break; } /* Declare soft PCM volume if needed. */ if ((mask & SOUND_MASK_PCM) == 0 || (devinfo->quirks & HDAA_QUIRK_SOFTPCMVOL) || pdevinfo->minamp[SOUND_MIXER_PCM] == pdevinfo->maxamp[SOUND_MIXER_PCM]) { mask |= SOUND_MASK_PCM; pcm_setflags(pdevinfo->dev, pcm_getflags(pdevinfo->dev) | SD_F_SOFTPCMVOL); HDA_BOOTHVERBOSE( device_printf(pdevinfo->dev, "Forcing Soft PCM volume\n"); ); } /* Declare master volume if needed. */ if ((mask & SOUND_MASK_VOLUME) == 0) { mask |= SOUND_MASK_VOLUME; mix_setparentchild(m, SOUND_MIXER_VOLUME, SOUND_MASK_PCM); mix_setrealdev(m, SOUND_MIXER_VOLUME, SOUND_MIXER_NONE); HDA_BOOTHVERBOSE( device_printf(pdevinfo->dev, "Forcing master volume with PCM\n"); ); } } /* Declare record sources available to this association. */ recmask = 0; if (pdevinfo->recas >= 0) { for (i = 0; i < 16; i++) { if (devinfo->as[pdevinfo->recas].dacs[0][i] < 0) continue; w = hdaa_widget_get(devinfo, devinfo->as[pdevinfo->recas].dacs[0][i]); if (w == NULL || w->enable == 0) continue; for (j = 0; j < w->nconns; j++) { if (w->connsenable[j] == 0) continue; cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || cw->enable == 0) continue; if (cw->bindas != pdevinfo->recas && cw->bindas != -2) continue; recmask |= cw->ossmask; } } } recmask &= (1 << SOUND_MIXER_NRDEVICES) - 1; mask &= (1 << SOUND_MIXER_NRDEVICES) - 1; pdevinfo->ossmask = mask; mix_setrecdevs(m, recmask); mix_setdevs(m, mask); hdaa_unlock(devinfo); return (0); } /* * Update amplification per pdevinfo per ossdev, calculate summary coefficient * and write it to codec, update *left and *right to reflect remaining error. */ static void hdaa_audio_ctl_dev_set(struct hdaa_audio_ctl *ctl, int ossdev, int mute, int *left, int *right) { int i, zleft, zright, sleft, sright, smute, lval, rval; ctl->devleft[ossdev] = *left; ctl->devright[ossdev] = *right; ctl->devmute[ossdev] = mute; smute = sleft = sright = zleft = zright = 0; for (i = 0; i < SOUND_MIXER_NRDEVICES; i++) { sleft += ctl->devleft[i]; sright += ctl->devright[i]; smute |= ctl->devmute[i]; if (i == ossdev) continue; zleft += ctl->devleft[i]; zright += ctl->devright[i]; } lval = QDB2VAL(ctl, sleft); rval = QDB2VAL(ctl, sright); hdaa_audio_ctl_amp_set(ctl, smute, lval, rval); *left -= VAL2QDB(ctl, lval) - VAL2QDB(ctl, QDB2VAL(ctl, zleft)); *right -= VAL2QDB(ctl, rval) - VAL2QDB(ctl, QDB2VAL(ctl, zright)); } /* * Trace signal from source, setting volumes on the way. */ static void hdaa_audio_ctl_source_volume(struct hdaa_pcm_devinfo *pdevinfo, int ossdev, nid_t nid, int index, int mute, int left, int right, int depth) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w, *wc; struct hdaa_audio_ctl *ctl; int i, j, conns = 0; if (depth > HDA_PARSE_MAXDEPTH) return; w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return; /* Count number of active inputs. */ if (depth > 0) { for (j = 0; j < w->nconns; j++) { if (!w->connsenable[j]) continue; conns++; } } /* If this is not a first step - use input mixer. Pins have common input ctl so care must be taken. */ if (depth > 0 && (conns == 1 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)) { ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, index, 1); if (ctl) hdaa_audio_ctl_dev_set(ctl, ossdev, mute, &left, &right); } /* If widget has own ossdev - not traverse it. It will be traversed on its own. */ if (w->ossdev >= 0 && depth > 0) return; /* We must not traverse pin */ if ((w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) && depth > 0) return; /* * If signals mixed, we can't assign controls farther. * Ignore this on depth zero. Caller must knows why. */ if (conns > 1 && (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER || w->selconn != index)) return; ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_OUT, -1, 1); if (ctl) hdaa_audio_ctl_dev_set(ctl, ossdev, mute, &left, &right); for (i = devinfo->startnode; i < devinfo->endnode; i++) { wc = hdaa_widget_get(devinfo, i); if (wc == NULL || wc->enable == 0) continue; for (j = 0; j < wc->nconns; j++) { if (wc->connsenable[j] && wc->conns[j] == nid) { hdaa_audio_ctl_source_volume(pdevinfo, ossdev, wc->nid, j, mute, left, right, depth + 1); } } } return; } /* * Trace signal from destination, setting volumes on the way. */ static void hdaa_audio_ctl_dest_volume(struct hdaa_pcm_devinfo *pdevinfo, int ossdev, nid_t nid, int index, int mute, int left, int right, int depth) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w, *wc; struct hdaa_audio_ctl *ctl; int i, j, consumers, cleft, cright; if (depth > HDA_PARSE_MAXDEPTH) return; w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return; if (depth > 0) { /* If this node produce output for several consumers, we can't touch it. */ consumers = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { wc = hdaa_widget_get(devinfo, i); if (wc == NULL || wc->enable == 0) continue; for (j = 0; j < wc->nconns; j++) { if (wc->connsenable[j] && wc->conns[j] == nid) consumers++; } } /* The only exception is if real HP redirection is configured and this is a duplication point. XXX: Actually exception is not completely correct. XXX: Duplication point check is not perfect. */ if ((consumers == 2 && (w->bindas < 0 || as[w->bindas].hpredir < 0 || as[w->bindas].fakeredir || (w->bindseqmask & (1 << 15)) == 0)) || consumers > 2) return; /* Else use it's output mixer. */ ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_OUT, -1, 1); if (ctl) hdaa_audio_ctl_dev_set(ctl, ossdev, mute, &left, &right); } /* We must not traverse pin */ if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && depth > 0) return; for (i = 0; i < w->nconns; i++) { if (w->connsenable[i] == 0) continue; if (index >= 0 && i != index) continue; cleft = left; cright = right; ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, i, 1); if (ctl) hdaa_audio_ctl_dev_set(ctl, ossdev, mute, &cleft, &cright); hdaa_audio_ctl_dest_volume(pdevinfo, ossdev, w->conns[i], -1, mute, cleft, cright, depth + 1); } } /* * Set volumes for the specified pdevinfo and ossdev. */ static void hdaa_audio_ctl_dev_volume(struct hdaa_pcm_devinfo *pdevinfo, unsigned dev) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w, *cw; uint32_t mute; int lvol, rvol; int i, j; mute = 0; if (pdevinfo->left[dev] == 0) { mute |= HDAA_AMP_MUTE_LEFT; lvol = -4000; } else lvol = ((pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]) * pdevinfo->left[dev] + 50) / 100 + pdevinfo->minamp[dev]; if (pdevinfo->right[dev] == 0) { mute |= HDAA_AMP_MUTE_RIGHT; rvol = -4000; } else rvol = ((pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]) * pdevinfo->right[dev] + 50) / 100 + pdevinfo->minamp[dev]; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->bindas < 0) { if (pdevinfo->index != 0) continue; } else { if (w->bindas != pdevinfo->playas && w->bindas != pdevinfo->recas) continue; } if (dev == SOUND_MIXER_RECLEV && w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) { hdaa_audio_ctl_dest_volume(pdevinfo, dev, w->nid, -1, mute, lvol, rvol, 0); continue; } if (dev == SOUND_MIXER_VOLUME && w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && devinfo->as[w->bindas].dir == HDAA_CTL_OUT) { hdaa_audio_ctl_dest_volume(pdevinfo, dev, w->nid, -1, mute, lvol, rvol, 0); continue; } if (dev == SOUND_MIXER_IGAIN && w->pflags & HDAA_ADC_MONITOR) { for (j = 0; j < w->nconns; j++) { if (!w->connsenable[j]) continue; cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || cw->enable == 0) continue; if (cw->bindas == -1) continue; if (cw->bindas >= 0 && devinfo->as[cw->bindas].dir != HDAA_CTL_IN) continue; hdaa_audio_ctl_dest_volume(pdevinfo, dev, w->nid, j, mute, lvol, rvol, 0); } continue; } if (w->ossdev != dev) continue; hdaa_audio_ctl_source_volume(pdevinfo, dev, w->nid, -1, mute, lvol, rvol, 0); if (dev == SOUND_MIXER_IMIX && (w->pflags & HDAA_IMIX_AS_DST)) hdaa_audio_ctl_dest_volume(pdevinfo, dev, w->nid, -1, mute, lvol, rvol, 0); } } /* * OSS Mixer set method. */ static int hdaa_audio_ctl_ossmixer_set(struct snd_mixer *m, unsigned dev, unsigned left, unsigned right) { struct hdaa_pcm_devinfo *pdevinfo = mix_getdevinfo(m); struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w; int i; hdaa_lock(devinfo); /* Save new values. */ pdevinfo->left[dev] = left; pdevinfo->right[dev] = right; /* 'ogain' is the special case implemented with EAPD. */ if (dev == SOUND_MIXER_OGAIN) { uint32_t orig; w = NULL; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->param.eapdbtl == HDA_INVALID) continue; break; } if (i >= devinfo->endnode) { hdaa_unlock(devinfo); return (-1); } orig = w->param.eapdbtl; if (left == 0) w->param.eapdbtl &= ~HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD; else w->param.eapdbtl |= HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD; if (orig != w->param.eapdbtl) { uint32_t val; val = w->param.eapdbtl; if (devinfo->quirks & HDAA_QUIRK_EAPDINV) val ^= HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD; hda_command(devinfo->dev, HDA_CMD_SET_EAPD_BTL_ENABLE(0, w->nid, val)); } hdaa_unlock(devinfo); return (left | (left << 8)); } /* Recalculate all controls related to this OSS device. */ hdaa_audio_ctl_dev_volume(pdevinfo, dev); hdaa_unlock(devinfo); return (left | (right << 8)); } /* * Set mixer settings to our own default values: * +20dB for mics, -10dB for analog vol, mute for igain, 0dB for others. */ static void hdaa_audio_ctl_set_defaults(struct hdaa_pcm_devinfo *pdevinfo) { int amp, vol, dev; for (dev = 0; dev < SOUND_MIXER_NRDEVICES; dev++) { if ((pdevinfo->ossmask & (1 << dev)) == 0) continue; /* If the value was overriden, leave it as is. */ if (resource_int_value(device_get_name(pdevinfo->dev), device_get_unit(pdevinfo->dev), ossnames[dev], &vol) == 0) continue; vol = -1; if (dev == SOUND_MIXER_OGAIN) vol = 100; else if (dev == SOUND_MIXER_IGAIN) vol = 0; else if (dev == SOUND_MIXER_MIC || dev == SOUND_MIXER_MONITOR) amp = 20 * 4; /* +20dB */ else if (dev == SOUND_MIXER_VOLUME && !pdevinfo->digital) amp = -10 * 4; /* -10dB */ else amp = 0; if (vol < 0 && (pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]) <= 0) { vol = 100; } else if (vol < 0) { vol = ((amp - pdevinfo->minamp[dev]) * 100 + (pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]) / 2) / (pdevinfo->maxamp[dev] - pdevinfo->minamp[dev]); vol = imin(imax(vol, 1), 100); } mix_set(pdevinfo->mixer, dev, vol, vol); } } /* * Recursively commutate specified record source. */ static uint32_t hdaa_audio_ctl_recsel_comm(struct hdaa_pcm_devinfo *pdevinfo, uint32_t src, nid_t nid, int depth) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w, *cw; struct hdaa_audio_ctl *ctl; char buf[64]; int i, muted; uint32_t res = 0; if (depth > HDA_PARSE_MAXDEPTH) return (0); w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return (0); for (i = 0; i < w->nconns; i++) { if (w->connsenable[i] == 0) continue; cw = hdaa_widget_get(devinfo, w->conns[i]); if (cw == NULL || cw->enable == 0 || cw->bindas == -1) continue; /* Call recursively to trace signal to it's source if needed. */ if ((src & cw->ossmask) != 0) { if (cw->ossdev < 0) { res |= hdaa_audio_ctl_recsel_comm(pdevinfo, src, w->conns[i], depth + 1); } else { res |= cw->ossmask; } } /* We have two special cases: mixers and others (selectors). */ if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) { ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, i, 1); if (ctl == NULL) continue; /* If we have input control on this node mute them * according to requested sources. */ muted = (src & cw->ossmask) ? 0 : 1; if (muted != ctl->forcemute) { ctl->forcemute = muted; hdaa_audio_ctl_amp_set(ctl, HDAA_AMP_MUTE_DEFAULT, HDAA_AMP_VOL_DEFAULT, HDAA_AMP_VOL_DEFAULT); } HDA_BOOTHVERBOSE( device_printf(pdevinfo->dev, "Recsel (%s): nid %d source %d %s\n", hdaa_audio_ctl_ossmixer_mask2allname( src, buf, sizeof(buf)), nid, i, muted?"mute":"unmute"); ); } else { if (w->nconns == 1) break; if ((src & cw->ossmask) == 0) continue; /* If we found requested source - select it and exit. */ hdaa_widget_connection_select(w, i); HDA_BOOTHVERBOSE( device_printf(pdevinfo->dev, "Recsel (%s): nid %d source %d select\n", hdaa_audio_ctl_ossmixer_mask2allname( src, buf, sizeof(buf)), nid, i); ); break; } } return (res); } static uint32_t hdaa_audio_ctl_ossmixer_setrecsrc(struct snd_mixer *m, uint32_t src) { struct hdaa_pcm_devinfo *pdevinfo = mix_getdevinfo(m); struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w; struct hdaa_audio_as *as; struct hdaa_audio_ctl *ctl; struct hdaa_chan *ch; int i, j; uint32_t ret = 0xffffffff; hdaa_lock(devinfo); if (pdevinfo->recas < 0) { hdaa_unlock(devinfo); return (0); } as = &devinfo->as[pdevinfo->recas]; /* For non-mixed associations we always recording everything. */ if (!as->mixed) { hdaa_unlock(devinfo); return (mix_getrecdevs(m)); } /* Commutate requested recsrc for each ADC. */ for (j = 0; j < as->num_chans; j++) { ch = &devinfo->chans[as->chans[j]]; for (i = 0; ch->io[i] >= 0; i++) { w = hdaa_widget_get(devinfo, ch->io[i]); if (w == NULL || w->enable == 0) continue; ret &= hdaa_audio_ctl_recsel_comm(pdevinfo, src, ch->io[i], 0); } } if (ret == 0xffffffff) ret = 0; /* * Some controls could be shared. Reset volumes for controls * related to previously chosen devices, as they may no longer * affect the signal. */ i = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { if (ctl->enable == 0 || !(ctl->ossmask & pdevinfo->recsrc)) continue; if (!((pdevinfo->playas >= 0 && ctl->widget->bindas == pdevinfo->playas) || (pdevinfo->recas >= 0 && ctl->widget->bindas == pdevinfo->recas) || (pdevinfo->index == 0 && ctl->widget->bindas == -2))) continue; for (j = 0; j < SOUND_MIXER_NRDEVICES; j++) { if (pdevinfo->recsrc & (1 << j)) { ctl->devleft[j] = 0; ctl->devright[j] = 0; ctl->devmute[j] = 0; } } } /* * Some controls could be shared. Set volumes for controls * related to devices selected both previously and now. */ for (j = 0; j < SOUND_MIXER_NRDEVICES; j++) { if ((ret | pdevinfo->recsrc) & (1 << j)) hdaa_audio_ctl_dev_volume(pdevinfo, j); } pdevinfo->recsrc = ret; hdaa_unlock(devinfo); return (ret); } static kobj_method_t hdaa_audio_ctl_ossmixer_methods[] = { KOBJMETHOD(mixer_init, hdaa_audio_ctl_ossmixer_init), KOBJMETHOD(mixer_set, hdaa_audio_ctl_ossmixer_set), KOBJMETHOD(mixer_setrecsrc, hdaa_audio_ctl_ossmixer_setrecsrc), KOBJMETHOD_END }; MIXER_DECLARE(hdaa_audio_ctl_ossmixer); static void hdaa_dump_gpi(struct hdaa_devinfo *devinfo) { device_t dev = devinfo->dev; int i; uint32_t data, wake, unsol, sticky; if (HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap) > 0) { data = hda_command(dev, HDA_CMD_GET_GPI_DATA(0, devinfo->nid)); wake = hda_command(dev, HDA_CMD_GET_GPI_WAKE_ENABLE_MASK(0, devinfo->nid)); unsol = hda_command(dev, HDA_CMD_GET_GPI_UNSOLICITED_ENABLE_MASK(0, devinfo->nid)); sticky = hda_command(dev, HDA_CMD_GET_GPI_STICKY_MASK(0, devinfo->nid)); for (i = 0; i < HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap); i++) { device_printf(dev, " GPI%d:%s%s%s state=%d", i, (sticky & (1 << i)) ? " sticky" : "", (unsol & (1 << i)) ? " unsol" : "", (wake & (1 << i)) ? " wake" : "", (data >> i) & 1); } } } static void hdaa_dump_gpio(struct hdaa_devinfo *devinfo) { device_t dev = devinfo->dev; int i; uint32_t data, dir, enable, wake, unsol, sticky; if (HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap) > 0) { data = hda_command(dev, HDA_CMD_GET_GPIO_DATA(0, devinfo->nid)); enable = hda_command(dev, HDA_CMD_GET_GPIO_ENABLE_MASK(0, devinfo->nid)); dir = hda_command(dev, HDA_CMD_GET_GPIO_DIRECTION(0, devinfo->nid)); wake = hda_command(dev, HDA_CMD_GET_GPIO_WAKE_ENABLE_MASK(0, devinfo->nid)); unsol = hda_command(dev, HDA_CMD_GET_GPIO_UNSOLICITED_ENABLE_MASK(0, devinfo->nid)); sticky = hda_command(dev, HDA_CMD_GET_GPIO_STICKY_MASK(0, devinfo->nid)); for (i = 0; i < HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap); i++) { device_printf(dev, " GPIO%d: ", i); if ((enable & (1 << i)) == 0) { printf("disabled\n"); continue; } if ((dir & (1 << i)) == 0) { printf("input%s%s%s", (sticky & (1 << i)) ? " sticky" : "", (unsol & (1 << i)) ? " unsol" : "", (wake & (1 << i)) ? " wake" : ""); } else printf("output"); printf(" state=%d\n", (data >> i) & 1); } } } static void hdaa_dump_gpo(struct hdaa_devinfo *devinfo) { device_t dev = devinfo->dev; int i; uint32_t data; if (HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap) > 0) { data = hda_command(dev, HDA_CMD_GET_GPO_DATA(0, devinfo->nid)); for (i = 0; i < HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap); i++) { device_printf(dev, " GPO%d: state=%d", i, (data >> i) & 1); } } } static void hdaa_audio_parse(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; uint32_t res; int i; nid_t nid; nid = devinfo->nid; res = hda_command(devinfo->dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_GPIO_COUNT)); devinfo->gpio_cap = res; HDA_BOOTVERBOSE( device_printf(devinfo->dev, "NumGPIO=%d NumGPO=%d " "NumGPI=%d GPIWake=%d GPIUnsol=%d\n", HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_GPI_WAKE(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_GPI_UNSOL(devinfo->gpio_cap)); hdaa_dump_gpi(devinfo); hdaa_dump_gpio(devinfo); hdaa_dump_gpo(devinfo); ); res = hda_command(devinfo->dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_SUPP_STREAM_FORMATS)); devinfo->supp_stream_formats = res; res = hda_command(devinfo->dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_SUPP_PCM_SIZE_RATE)); devinfo->supp_pcm_size_rate = res; res = hda_command(devinfo->dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_OUTPUT_AMP_CAP)); devinfo->outamp_cap = res; res = hda_command(devinfo->dev, HDA_CMD_GET_PARAMETER(0, nid, HDA_PARAM_INPUT_AMP_CAP)); devinfo->inamp_cap = res; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL) device_printf(devinfo->dev, "Ghost widget! nid=%d!\n", i); else { w->devinfo = devinfo; w->nid = i; w->enable = 1; w->selconn = -1; w->pflags = 0; w->ossdev = -1; w->bindas = -1; w->param.eapdbtl = HDA_INVALID; hdaa_widget_parse(w); } } } static void hdaa_audio_postprocess(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; int i; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL) continue; hdaa_widget_postprocess(w); } } static void hdaa_audio_ctl_parse(struct hdaa_devinfo *devinfo) { struct hdaa_audio_ctl *ctls; struct hdaa_widget *w, *cw; int i, j, cnt, max, ocap, icap; int mute, offset, step, size; /* XXX This is redundant */ max = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->param.outamp_cap != 0) max++; if (w->param.inamp_cap != 0) { switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR: case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER: for (j = 0; j < w->nconns; j++) { cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || cw->enable == 0) continue; max++; } break; default: max++; break; } } } devinfo->ctlcnt = max; if (max < 1) return; - ctls = (struct hdaa_audio_ctl *)malloc( - sizeof(*ctls) * max, M_HDAA, M_ZERO | M_NOWAIT); + ctls = (struct hdaa_audio_ctl *)mallocarray(max, + sizeof(*ctls), M_HDAA, M_ZERO | M_NOWAIT); if (ctls == NULL) { /* Blekh! */ device_printf(devinfo->dev, "unable to allocate ctls!\n"); devinfo->ctlcnt = 0; return; } cnt = 0; for (i = devinfo->startnode; cnt < max && i < devinfo->endnode; i++) { if (cnt >= max) { device_printf(devinfo->dev, "%s: Ctl overflow!\n", __func__); break; } w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; ocap = w->param.outamp_cap; icap = w->param.inamp_cap; if (ocap != 0) { mute = HDA_PARAM_OUTPUT_AMP_CAP_MUTE_CAP(ocap); step = HDA_PARAM_OUTPUT_AMP_CAP_NUMSTEPS(ocap); size = HDA_PARAM_OUTPUT_AMP_CAP_STEPSIZE(ocap); offset = HDA_PARAM_OUTPUT_AMP_CAP_OFFSET(ocap); /*if (offset > step) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, "BUGGY outamp: nid=%d " "[offset=%d > step=%d]\n", w->nid, offset, step); ); offset = step; }*/ ctls[cnt].enable = 1; ctls[cnt].widget = w; ctls[cnt].mute = mute; ctls[cnt].step = step; ctls[cnt].size = size; ctls[cnt].offset = offset; ctls[cnt].left = offset; ctls[cnt].right = offset; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->waspin) ctls[cnt].ndir = HDAA_CTL_IN; else ctls[cnt].ndir = HDAA_CTL_OUT; ctls[cnt++].dir = HDAA_CTL_OUT; } if (icap != 0) { mute = HDA_PARAM_OUTPUT_AMP_CAP_MUTE_CAP(icap); step = HDA_PARAM_OUTPUT_AMP_CAP_NUMSTEPS(icap); size = HDA_PARAM_OUTPUT_AMP_CAP_STEPSIZE(icap); offset = HDA_PARAM_OUTPUT_AMP_CAP_OFFSET(icap); /*if (offset > step) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, "BUGGY inamp: nid=%d " "[offset=%d > step=%d]\n", w->nid, offset, step); ); offset = step; }*/ switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR: case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER: for (j = 0; j < w->nconns; j++) { if (cnt >= max) { device_printf(devinfo->dev, "%s: Ctl overflow!\n", __func__); break; } cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || cw->enable == 0) continue; ctls[cnt].enable = 1; ctls[cnt].widget = w; ctls[cnt].childwidget = cw; ctls[cnt].index = j; ctls[cnt].mute = mute; ctls[cnt].step = step; ctls[cnt].size = size; ctls[cnt].offset = offset; ctls[cnt].left = offset; ctls[cnt].right = offset; ctls[cnt].ndir = HDAA_CTL_IN; ctls[cnt++].dir = HDAA_CTL_IN; } break; default: if (cnt >= max) { device_printf(devinfo->dev, "%s: Ctl overflow!\n", __func__); break; } ctls[cnt].enable = 1; ctls[cnt].widget = w; ctls[cnt].mute = mute; ctls[cnt].step = step; ctls[cnt].size = size; ctls[cnt].offset = offset; ctls[cnt].left = offset; ctls[cnt].right = offset; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) ctls[cnt].ndir = HDAA_CTL_OUT; else ctls[cnt].ndir = HDAA_CTL_IN; ctls[cnt++].dir = HDAA_CTL_IN; break; } } } devinfo->ctl = ctls; } static void hdaa_audio_as_parse(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as; struct hdaa_widget *w; int i, j, cnt, max, type, dir, assoc, seq, first, hpredir; /* Count present associations */ max = 0; for (j = 1; j < 16; j++) { for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (HDA_CONFIG_DEFAULTCONF_ASSOCIATION(w->wclass.pin.config) != j) continue; max++; if (j != 15) /* There could be many 1-pin assocs #15 */ break; } } devinfo->ascnt = max; if (max < 1) return; - as = (struct hdaa_audio_as *)malloc( - sizeof(*as) * max, M_HDAA, M_ZERO | M_NOWAIT); + as = (struct hdaa_audio_as *)mallocarray(max, + sizeof(*as), M_HDAA, M_ZERO | M_NOWAIT); if (as == NULL) { /* Blekh! */ device_printf(devinfo->dev, "unable to allocate assocs!\n"); devinfo->ascnt = 0; return; } for (i = 0; i < max; i++) { as[i].hpredir = -1; as[i].digital = 0; as[i].num_chans = 1; as[i].location = -1; } /* Scan associations skipping as=0. */ cnt = 0; for (j = 1; j < 16 && cnt < max; j++) { first = 16; hpredir = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; assoc = HDA_CONFIG_DEFAULTCONF_ASSOCIATION(w->wclass.pin.config); seq = HDA_CONFIG_DEFAULTCONF_SEQUENCE(w->wclass.pin.config); if (assoc != j) { continue; } KASSERT(cnt < max, ("%s: Associations owerflow (%d of %d)", __func__, cnt, max)); type = w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK; /* Get pin direction. */ if (type == HDA_CONFIG_DEFAULTCONF_DEVICE_LINE_OUT || type == HDA_CONFIG_DEFAULTCONF_DEVICE_SPEAKER || type == HDA_CONFIG_DEFAULTCONF_DEVICE_HP_OUT || type == HDA_CONFIG_DEFAULTCONF_DEVICE_SPDIF_OUT || type == HDA_CONFIG_DEFAULTCONF_DEVICE_DIGITAL_OTHER_OUT) dir = HDAA_CTL_OUT; else dir = HDAA_CTL_IN; /* If this is a first pin - create new association. */ if (as[cnt].pincnt == 0) { as[cnt].enable = 1; as[cnt].index = j; as[cnt].dir = dir; } if (seq < first) first = seq; /* Check association correctness. */ if (as[cnt].pins[seq] != 0) { device_printf(devinfo->dev, "%s: Duplicate pin %d (%d) " "in association %d! Disabling association.\n", __func__, seq, w->nid, j); as[cnt].enable = 0; } if (dir != as[cnt].dir) { device_printf(devinfo->dev, "%s: Pin %d has wrong " "direction for association %d! Disabling " "association.\n", __func__, w->nid, j); as[cnt].enable = 0; } if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) { as[cnt].digital |= 0x1; if (HDA_PARAM_PIN_CAP_HDMI(w->wclass.pin.cap)) as[cnt].digital |= 0x2; if (HDA_PARAM_PIN_CAP_DP(w->wclass.pin.cap)) as[cnt].digital |= 0x4; } if (as[cnt].location == -1) { as[cnt].location = HDA_CONFIG_DEFAULTCONF_LOCATION(w->wclass.pin.config); } else if (as[cnt].location != HDA_CONFIG_DEFAULTCONF_LOCATION(w->wclass.pin.config)) { as[cnt].location = -2; } /* Headphones with seq=15 may mean redirection. */ if (type == HDA_CONFIG_DEFAULTCONF_DEVICE_HP_OUT && seq == 15) hpredir = 1; as[cnt].pins[seq] = w->nid; as[cnt].pincnt++; /* Association 15 is a multiple unassociated pins. */ if (j == 15) cnt++; } if (j != 15 && as[cnt].pincnt > 0) { if (hpredir && as[cnt].pincnt > 1) as[cnt].hpredir = first; cnt++; } } for (i = 0; i < max; i++) { if (as[i].dir == HDAA_CTL_IN && (as[i].pincnt == 1 || as[i].pins[14] > 0 || as[i].pins[15] > 0)) as[i].mixed = 1; } HDA_BOOTVERBOSE( device_printf(devinfo->dev, "%d associations found:\n", max); for (i = 0; i < max; i++) { device_printf(devinfo->dev, "Association %d (%d) %s%s:\n", i, as[i].index, (as[i].dir == HDAA_CTL_IN)?"in":"out", as[i].enable?"":" (disabled)"); for (j = 0; j < 16; j++) { if (as[i].pins[j] == 0) continue; device_printf(devinfo->dev, " Pin nid=%d seq=%d\n", as[i].pins[j], j); } } ); devinfo->as = as; } /* * Trace path from DAC to pin. */ static nid_t hdaa_audio_trace_dac(struct hdaa_devinfo *devinfo, int as, int seq, nid_t nid, int dupseq, int min, int only, int depth) { struct hdaa_widget *w; int i, im = -1; nid_t m = 0, ret; if (depth > HDA_PARSE_MAXDEPTH) return (0); w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return (0); HDA_BOOTHVERBOSE( if (!only) { device_printf(devinfo->dev, " %*stracing via nid %d\n", depth + 1, "", w->nid); } ); /* Use only unused widgets */ if (w->bindas >= 0 && w->bindas != as) { HDA_BOOTHVERBOSE( if (!only) { device_printf(devinfo->dev, " %*snid %d busy by association %d\n", depth + 1, "", w->nid, w->bindas); } ); return (0); } if (dupseq < 0) { if (w->bindseqmask != 0) { HDA_BOOTHVERBOSE( if (!only) { device_printf(devinfo->dev, " %*snid %d busy by seqmask %x\n", depth + 1, "", w->nid, w->bindseqmask); } ); return (0); } } else { /* If this is headphones - allow duplicate first pin. */ if (w->bindseqmask != 0 && (w->bindseqmask & (1 << dupseq)) == 0) { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d busy by seqmask %x\n", depth + 1, "", w->nid, w->bindseqmask); ); return (0); } } switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT: /* Do not traverse input. AD1988 has digital monitor for which we are not ready. */ break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT: /* If we are tracing HP take only dac of first pin. */ if ((only == 0 || only == w->nid) && (w->nid >= min) && (dupseq < 0 || w->nid == devinfo->as[as].dacs[0][dupseq])) m = w->nid; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX: if (depth > 0) break; /* Fall */ default: /* Find reachable DACs with smallest nid respecting constraints. */ for (i = 0; i < w->nconns; i++) { if (w->connsenable[i] == 0) continue; if (w->selconn != -1 && w->selconn != i) continue; if ((ret = hdaa_audio_trace_dac(devinfo, as, seq, w->conns[i], dupseq, min, only, depth + 1)) != 0) { if (m == 0 || ret < m) { m = ret; im = i; } if (only || dupseq >= 0) break; } } if (im >= 0 && only && ((w->nconns > 1 && w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR)) w->selconn = im; break; } if (m && only) { w->bindas = as; w->bindseqmask |= (1 << seq); } HDA_BOOTHVERBOSE( if (!only) { device_printf(devinfo->dev, " %*snid %d returned %d\n", depth + 1, "", w->nid, m); } ); return (m); } /* * Trace path from widget to ADC. */ static nid_t hdaa_audio_trace_adc(struct hdaa_devinfo *devinfo, int as, int seq, nid_t nid, int mixed, int min, int only, int depth, int *length, int onlylength) { struct hdaa_widget *w, *wc; int i, j, im, lm = HDA_PARSE_MAXDEPTH; nid_t m = 0, ret; if (depth > HDA_PARSE_MAXDEPTH) return (0); w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return (0); HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*stracing via nid %d\n", depth + 1, "", w->nid); ); /* Use only unused widgets */ if (w->bindas >= 0 && w->bindas != as) { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d busy by association %d\n", depth + 1, "", w->nid, w->bindas); ); return (0); } if (!mixed && w->bindseqmask != 0) { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d busy by seqmask %x\n", depth + 1, "", w->nid, w->bindseqmask); ); return (0); } switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT: if ((only == 0 || only == w->nid) && (w->nid >= min) && (onlylength == 0 || onlylength == depth)) { m = w->nid; *length = depth; } break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX: if (depth > 0) break; /* Fall */ default: /* Try to find reachable ADCs with specified nid. */ for (j = devinfo->startnode; j < devinfo->endnode; j++) { wc = hdaa_widget_get(devinfo, j); if (wc == NULL || wc->enable == 0) continue; im = -1; for (i = 0; i < wc->nconns; i++) { if (wc->connsenable[i] == 0) continue; if (wc->conns[i] != nid) continue; if ((ret = hdaa_audio_trace_adc(devinfo, as, seq, j, mixed, min, only, depth + 1, length, onlylength)) != 0) { if (m == 0 || ret < m || (ret == m && *length < lm)) { m = ret; im = i; lm = *length; } else *length = lm; if (only) break; } } if (im >= 0 && only && ((wc->nconns > 1 && wc->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) || wc->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR)) wc->selconn = im; } break; } if (m && only) { w->bindas = as; w->bindseqmask |= (1 << seq); } HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d returned %d\n", depth + 1, "", w->nid, m); ); return (m); } /* * Erase trace path of the specified association. */ static void hdaa_audio_undo_trace(struct hdaa_devinfo *devinfo, int as, int seq) { struct hdaa_widget *w; int i; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->bindas == as) { if (seq >= 0) { w->bindseqmask &= ~(1 << seq); if (w->bindseqmask == 0) { w->bindas = -1; w->selconn = -1; } } else { w->bindas = -1; w->bindseqmask = 0; w->selconn = -1; } } } } /* * Trace association path from DAC to output */ static int hdaa_audio_trace_as_out(struct hdaa_devinfo *devinfo, int as, int seq) { struct hdaa_audio_as *ases = devinfo->as; int i, hpredir; nid_t min, res; /* Find next pin */ for (i = seq; i < 16 && ases[as].pins[i] == 0; i++) ; /* Check if there is no any left. If so - we succeeded. */ if (i == 16) return (1); hpredir = (i == 15 && ases[as].fakeredir == 0)?ases[as].hpredir:-1; min = 0; do { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Tracing pin %d with min nid %d", ases[as].pins[i], min); if (hpredir >= 0) printf(" and hpredir %d", hpredir); printf("\n"); ); /* Trace this pin taking min nid into account. */ res = hdaa_audio_trace_dac(devinfo, as, i, ases[as].pins[i], hpredir, min, 0, 0); if (res == 0) { /* If we failed - return to previous and redo it. */ HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Unable to trace pin %d seq %d with min " "nid %d", ases[as].pins[i], i, min); if (hpredir >= 0) printf(" and hpredir %d", hpredir); printf("\n"); ); return (0); } HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Pin %d traced to DAC %d", ases[as].pins[i], res); if (hpredir >= 0) printf(" and hpredir %d", hpredir); if (ases[as].fakeredir) printf(" with fake redirection"); printf("\n"); ); /* Trace again to mark the path */ hdaa_audio_trace_dac(devinfo, as, i, ases[as].pins[i], hpredir, min, res, 0); ases[as].dacs[0][i] = res; /* We succeeded, so call next. */ if (hdaa_audio_trace_as_out(devinfo, as, i + 1)) return (1); /* If next failed, we should retry with next min */ hdaa_audio_undo_trace(devinfo, as, i); ases[as].dacs[0][i] = 0; min = res + 1; } while (1); } /* * Check equivalency of two DACs. */ static int hdaa_audio_dacs_equal(struct hdaa_widget *w1, struct hdaa_widget *w2) { struct hdaa_devinfo *devinfo = w1->devinfo; struct hdaa_widget *w3; int i, j, c1, c2; if (memcmp(&w1->param, &w2->param, sizeof(w1->param))) return (0); for (i = devinfo->startnode; i < devinfo->endnode; i++) { w3 = hdaa_widget_get(devinfo, i); if (w3 == NULL || w3->enable == 0) continue; if (w3->bindas != w1->bindas) continue; if (w3->nconns == 0) continue; c1 = c2 = -1; for (j = 0; j < w3->nconns; j++) { if (w3->connsenable[j] == 0) continue; if (w3->conns[j] == w1->nid) c1 = j; if (w3->conns[j] == w2->nid) c2 = j; } if (c1 < 0) continue; if (c2 < 0) return (0); if (w3->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) return (0); } return (1); } /* * Check equivalency of two ADCs. */ static int hdaa_audio_adcs_equal(struct hdaa_widget *w1, struct hdaa_widget *w2) { struct hdaa_devinfo *devinfo = w1->devinfo; struct hdaa_widget *w3, *w4; int i; if (memcmp(&w1->param, &w2->param, sizeof(w1->param))) return (0); if (w1->nconns != 1 || w2->nconns != 1) return (0); if (w1->conns[0] == w2->conns[0]) return (1); w3 = hdaa_widget_get(devinfo, w1->conns[0]); if (w3 == NULL || w3->enable == 0) return (0); w4 = hdaa_widget_get(devinfo, w2->conns[0]); if (w4 == NULL || w4->enable == 0) return (0); if (w3->bindas == w4->bindas && w3->bindseqmask == w4->bindseqmask) return (1); if (w4->bindas >= 0) return (0); if (w3->type != w4->type) return (0); if (memcmp(&w3->param, &w4->param, sizeof(w3->param))) return (0); if (w3->nconns != w4->nconns) return (0); for (i = 0; i < w3->nconns; i++) { if (w3->conns[i] != w4->conns[i]) return (0); } return (1); } /* * Look for equivalent DAC/ADC to implement second channel. */ static void hdaa_audio_adddac(struct hdaa_devinfo *devinfo, int asid) { struct hdaa_audio_as *as = &devinfo->as[asid]; struct hdaa_widget *w1, *w2; int i, pos; nid_t nid1, nid2; HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Looking for additional %sC " "for association %d (%d)\n", (as->dir == HDAA_CTL_OUT) ? "DA" : "AD", asid, as->index); ); /* Find the exisitng DAC position and return if found more the one. */ pos = -1; for (i = 0; i < 16; i++) { if (as->dacs[0][i] <= 0) continue; if (pos >= 0 && as->dacs[0][i] != as->dacs[0][pos]) return; pos = i; } nid1 = as->dacs[0][pos]; w1 = hdaa_widget_get(devinfo, nid1); w2 = NULL; for (nid2 = devinfo->startnode; nid2 < devinfo->endnode; nid2++) { w2 = hdaa_widget_get(devinfo, nid2); if (w2 == NULL || w2->enable == 0) continue; if (w2->bindas >= 0) continue; if (w1->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT) { if (w2->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT) continue; if (hdaa_audio_dacs_equal(w1, w2)) break; } else { if (w2->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) continue; if (hdaa_audio_adcs_equal(w1, w2)) break; } } if (nid2 >= devinfo->endnode) return; w2->bindas = w1->bindas; w2->bindseqmask = w1->bindseqmask; if (w1->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, " ADC %d considered equal to ADC %d\n", nid2, nid1); ); w1 = hdaa_widget_get(devinfo, w1->conns[0]); w2 = hdaa_widget_get(devinfo, w2->conns[0]); w2->bindas = w1->bindas; w2->bindseqmask = w1->bindseqmask; } else { HDA_BOOTVERBOSE( device_printf(devinfo->dev, " DAC %d considered equal to DAC %d\n", nid2, nid1); ); } for (i = 0; i < 16; i++) { if (as->dacs[0][i] <= 0) continue; as->dacs[as->num_chans][i] = nid2; } as->num_chans++; } /* * Trace association path from input to ADC */ static int hdaa_audio_trace_as_in(struct hdaa_devinfo *devinfo, int as) { struct hdaa_audio_as *ases = devinfo->as; struct hdaa_widget *w; int i, j, k, length; for (j = devinfo->startnode; j < devinfo->endnode; j++) { w = hdaa_widget_get(devinfo, j); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) continue; if (w->bindas >= 0 && w->bindas != as) continue; /* Find next pin */ for (i = 0; i < 16; i++) { if (ases[as].pins[i] == 0) continue; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Tracing pin %d to ADC %d\n", ases[as].pins[i], j); ); /* Trace this pin taking goal into account. */ if (hdaa_audio_trace_adc(devinfo, as, i, ases[as].pins[i], 1, 0, j, 0, &length, 0) == 0) { /* If we failed - return to previous and redo it. */ HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Unable to trace pin %d to ADC %d, undo traces\n", ases[as].pins[i], j); ); hdaa_audio_undo_trace(devinfo, as, -1); for (k = 0; k < 16; k++) ases[as].dacs[0][k] = 0; break; } HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Pin %d traced to ADC %d\n", ases[as].pins[i], j); ); ases[as].dacs[0][i] = j; } if (i == 16) return (1); } return (0); } /* * Trace association path from input to multiple ADCs */ static int hdaa_audio_trace_as_in_mch(struct hdaa_devinfo *devinfo, int as, int seq) { struct hdaa_audio_as *ases = devinfo->as; int i, length; nid_t min, res; /* Find next pin */ for (i = seq; i < 16 && ases[as].pins[i] == 0; i++) ; /* Check if there is no any left. If so - we succeeded. */ if (i == 16) return (1); min = 0; do { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Tracing pin %d with min nid %d", ases[as].pins[i], min); printf("\n"); ); /* Trace this pin taking min nid into account. */ res = hdaa_audio_trace_adc(devinfo, as, i, ases[as].pins[i], 0, min, 0, 0, &length, 0); if (res == 0) { /* If we failed - return to previous and redo it. */ HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Unable to trace pin %d seq %d with min " "nid %d", ases[as].pins[i], i, min); printf("\n"); ); return (0); } HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Pin %d traced to ADC %d\n", ases[as].pins[i], res); ); /* Trace again to mark the path */ hdaa_audio_trace_adc(devinfo, as, i, ases[as].pins[i], 0, min, res, 0, &length, length); ases[as].dacs[0][i] = res; /* We succeeded, so call next. */ if (hdaa_audio_trace_as_in_mch(devinfo, as, i + 1)) return (1); /* If next failed, we should retry with next min */ hdaa_audio_undo_trace(devinfo, as, i); ases[as].dacs[0][i] = 0; min = res + 1; } while (1); } /* * Trace input monitor path from mixer to output association. */ static int hdaa_audio_trace_to_out(struct hdaa_devinfo *devinfo, nid_t nid, int depth) { struct hdaa_audio_as *ases = devinfo->as; struct hdaa_widget *w, *wc; int i, j; nid_t res = 0; if (depth > HDA_PARSE_MAXDEPTH) return (0); w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return (0); HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*stracing via nid %d\n", depth + 1, "", w->nid); ); /* Use only unused widgets */ if (depth > 0 && w->bindas != -1) { if (w->bindas < 0 || ases[w->bindas].dir == HDAA_CTL_OUT) { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d found output association %d\n", depth + 1, "", w->nid, w->bindas); ); if (w->bindas >= 0) w->pflags |= HDAA_ADC_MONITOR; return (1); } else { HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d busy by input association %d\n", depth + 1, "", w->nid, w->bindas); ); return (0); } } switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT: /* Do not traverse input. AD1988 has digital monitor for which we are not ready. */ break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX: if (depth > 0) break; /* Fall */ default: /* Try to find reachable ADCs with specified nid. */ for (j = devinfo->startnode; j < devinfo->endnode; j++) { wc = hdaa_widget_get(devinfo, j); if (wc == NULL || wc->enable == 0) continue; for (i = 0; i < wc->nconns; i++) { if (wc->connsenable[i] == 0) continue; if (wc->conns[i] != nid) continue; if (hdaa_audio_trace_to_out(devinfo, j, depth + 1) != 0) { res = 1; if (wc->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR && wc->selconn == -1) wc->selconn = i; } } } break; } if (res && w->bindas == -1) w->bindas = -2; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " %*snid %d returned %d\n", depth + 1, "", w->nid, res); ); return (res); } /* * Trace extra associations (beeper, monitor) */ static void hdaa_audio_trace_as_extra(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w; int j; /* Input monitor */ /* Find mixer associated with input, but supplying signal for output associations. Hope it will be input monitor. */ HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Tracing input monitor\n"); ); for (j = devinfo->startnode; j < devinfo->endnode; j++) { w = hdaa_widget_get(devinfo, j); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) continue; if (w->bindas < 0 || as[w->bindas].dir != HDAA_CTL_IN) continue; HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Tracing nid %d to out\n", j); ); if (hdaa_audio_trace_to_out(devinfo, w->nid, 0)) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, " nid %d is input monitor\n", w->nid); ); w->ossdev = SOUND_MIXER_IMIX; } } /* Other inputs monitor */ /* Find input pins supplying signal for output associations. Hope it will be input monitoring. */ HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Tracing other input monitors\n"); ); for (j = devinfo->startnode; j < devinfo->endnode; j++) { w = hdaa_widget_get(devinfo, j); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (w->bindas < 0 || as[w->bindas].dir != HDAA_CTL_IN) continue; HDA_BOOTVERBOSE( device_printf(devinfo->dev, " Tracing nid %d to out\n", j); ); if (hdaa_audio_trace_to_out(devinfo, w->nid, 0)) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, " nid %d is input monitor\n", w->nid); ); } } /* Beeper */ HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Tracing beeper\n"); ); for (j = devinfo->startnode; j < devinfo->endnode; j++) { w = hdaa_widget_get(devinfo, j); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_BEEP_WIDGET) continue; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Tracing nid %d to out\n", j); ); if (hdaa_audio_trace_to_out(devinfo, w->nid, 0)) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, " nid %d traced to out\n", j); ); } w->bindas = -2; } } /* * Bind assotiations to PCM channels */ static void hdaa_audio_bind_as(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; int i, j, cnt = 0, free; for (j = 0; j < devinfo->ascnt; j++) { if (as[j].enable) cnt += as[j].num_chans; } if (devinfo->num_chans == 0) { - devinfo->chans = (struct hdaa_chan *)malloc( - sizeof(struct hdaa_chan) * cnt, + devinfo->chans = (struct hdaa_chan *)mallocarray(cnt, + sizeof(struct hdaa_chan), M_HDAA, M_ZERO | M_NOWAIT); if (devinfo->chans == NULL) { device_printf(devinfo->dev, "Channels memory allocation failed!\n"); return; } } else { devinfo->chans = (struct hdaa_chan *)realloc(devinfo->chans, sizeof(struct hdaa_chan) * (devinfo->num_chans + cnt), M_HDAA, M_ZERO | M_NOWAIT); if (devinfo->chans == NULL) { devinfo->num_chans = 0; device_printf(devinfo->dev, "Channels memory allocation failed!\n"); return; } /* Fixup relative pointers after realloc */ for (j = 0; j < devinfo->num_chans; j++) devinfo->chans[j].caps.fmtlist = devinfo->chans[j].fmtlist; } free = devinfo->num_chans; devinfo->num_chans += cnt; for (j = free; j < free + cnt; j++) { devinfo->chans[j].devinfo = devinfo; devinfo->chans[j].as = -1; } /* Assign associations in order of their numbers, */ for (j = 0; j < devinfo->ascnt; j++) { if (as[j].enable == 0) continue; for (i = 0; i < as[j].num_chans; i++) { devinfo->chans[free].as = j; devinfo->chans[free].asindex = i; devinfo->chans[free].dir = (as[j].dir == HDAA_CTL_IN) ? PCMDIR_REC : PCMDIR_PLAY; hdaa_pcmchannel_setup(&devinfo->chans[free]); as[j].chans[i] = free; free++; } } } static void hdaa_audio_disable_nonaudio(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; int i; /* Disable power and volume widgets. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_POWER_WIDGET || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_VOLUME_WIDGET) { w->enable = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling nid %d due to it's" " non-audio type.\n", w->nid); ); } } } static void hdaa_audio_disable_useless(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w, *cw; struct hdaa_audio_ctl *ctl; int done, found, i, j, k; /* Disable useless pins. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) { if ((w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK) == HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_NONE) { w->enable = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling pin nid %d due" " to None connectivity.\n", w->nid); ); } else if ((w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_ASSOCIATION_MASK) == 0) { w->enable = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling unassociated" " pin nid %d.\n", w->nid); ); } } } do { done = 1; /* Disable and mute controls for disabled widgets. */ i = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { if (ctl->enable == 0) continue; if (ctl->widget->enable == 0 || (ctl->childwidget != NULL && ctl->childwidget->enable == 0)) { ctl->forcemute = 1; ctl->muted = HDAA_AMP_MUTE_ALL; ctl->left = 0; ctl->right = 0; ctl->enable = 0; if (ctl->ndir == HDAA_CTL_IN) ctl->widget->connsenable[ctl->index] = 0; done = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling ctl %d nid %d cnid %d due" " to disabled widget.\n", i, ctl->widget->nid, (ctl->childwidget != NULL)? ctl->childwidget->nid:-1); ); } } /* Disable useless widgets. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; /* Disable inputs with disabled child widgets. */ for (j = 0; j < w->nconns; j++) { if (w->connsenable[j]) { cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || cw->enable == 0) { w->connsenable[j] = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling nid %d connection %d due" " to disabled child widget.\n", i, j); ); } } } if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR && w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) continue; /* Disable mixers and selectors without inputs. */ found = 0; for (j = 0; j < w->nconns; j++) { if (w->connsenable[j]) { found = 1; break; } } if (found == 0) { w->enable = 0; done = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling nid %d due to all it's" " inputs disabled.\n", w->nid); ); } /* Disable nodes without consumers. */ if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_SELECTOR && w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) continue; found = 0; for (k = devinfo->startnode; k < devinfo->endnode; k++) { cw = hdaa_widget_get(devinfo, k); if (cw == NULL || cw->enable == 0) continue; for (j = 0; j < cw->nconns; j++) { if (cw->connsenable[j] && cw->conns[j] == i) { found = 1; break; } } } if (found == 0) { w->enable = 0; done = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling nid %d due to all it's" " consumers disabled.\n", w->nid); ); } } } while (done == 0); } static void hdaa_audio_disable_unas(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w, *cw; struct hdaa_audio_ctl *ctl; int i, j, k; /* Disable unassosiated widgets. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->bindas == -1) { w->enable = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling unassociated nid %d.\n", w->nid); ); } } /* Disable input connections on input pin and * output on output. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (w->bindas < 0) continue; if (as[w->bindas].dir == HDAA_CTL_IN) { for (j = 0; j < w->nconns; j++) { if (w->connsenable[j] == 0) continue; w->connsenable[j] = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling connection to input pin " "nid %d conn %d.\n", i, j); ); } ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, -1, 1); if (ctl && ctl->enable) { ctl->forcemute = 1; ctl->muted = HDAA_AMP_MUTE_ALL; ctl->left = 0; ctl->right = 0; ctl->enable = 0; } } else { ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_OUT, -1, 1); if (ctl && ctl->enable) { ctl->forcemute = 1; ctl->muted = HDAA_AMP_MUTE_ALL; ctl->left = 0; ctl->right = 0; ctl->enable = 0; } for (k = devinfo->startnode; k < devinfo->endnode; k++) { cw = hdaa_widget_get(devinfo, k); if (cw == NULL || cw->enable == 0) continue; for (j = 0; j < cw->nconns; j++) { if (cw->connsenable[j] && cw->conns[j] == i) { cw->connsenable[j] = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling connection from output pin " "nid %d conn %d cnid %d.\n", k, j, i); ); if (cw->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && cw->nconns > 1) continue; ctl = hdaa_audio_ctl_amp_get(devinfo, k, HDAA_CTL_IN, j, 1); if (ctl && ctl->enable) { ctl->forcemute = 1; ctl->muted = HDAA_AMP_MUTE_ALL; ctl->left = 0; ctl->right = 0; ctl->enable = 0; } } } } } } } static void hdaa_audio_disable_notselected(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w; int i, j; /* On playback path we can safely disable all unseleted inputs. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->nconns <= 1) continue; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) continue; if (w->bindas < 0 || as[w->bindas].dir == HDAA_CTL_IN) continue; for (j = 0; j < w->nconns; j++) { if (w->connsenable[j] == 0) continue; if (w->selconn < 0 || w->selconn == j) continue; w->connsenable[j] = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling unselected connection " "nid %d conn %d.\n", i, j); ); } } } static void hdaa_audio_disable_crossas(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *ases = devinfo->as; struct hdaa_widget *w, *cw; struct hdaa_audio_ctl *ctl; int i, j; /* Disable crossassociatement and unwanted crosschannel connections. */ /* ... using selectors */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->nconns <= 1) continue; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) continue; /* Allow any -> mix */ if (w->bindas == -2) continue; for (j = 0; j < w->nconns; j++) { if (w->connsenable[j] == 0) continue; cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || w->enable == 0) continue; /* Allow mix -> out. */ if (cw->bindas == -2 && w->bindas >= 0 && ases[w->bindas].dir == HDAA_CTL_OUT) continue; /* Allow mix -> mixed-in. */ if (cw->bindas == -2 && w->bindas >= 0 && ases[w->bindas].mixed) continue; /* Allow in -> mix. */ if ((w->pflags & HDAA_ADC_MONITOR) && cw->bindas >= 0 && ases[cw->bindas].dir == HDAA_CTL_IN) continue; /* Allow if have common as/seqs. */ if (w->bindas == cw->bindas && (w->bindseqmask & cw->bindseqmask) != 0) continue; w->connsenable[j] = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling crossassociatement connection " "nid %d conn %d cnid %d.\n", i, j, cw->nid); ); } } /* ... using controls */ i = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { if (ctl->enable == 0 || ctl->childwidget == NULL) continue; /* Allow any -> mix */ if (ctl->widget->bindas == -2) continue; /* Allow mix -> out. */ if (ctl->childwidget->bindas == -2 && ctl->widget->bindas >= 0 && ases[ctl->widget->bindas].dir == HDAA_CTL_OUT) continue; /* Allow mix -> mixed-in. */ if (ctl->childwidget->bindas == -2 && ctl->widget->bindas >= 0 && ases[ctl->widget->bindas].mixed) continue; /* Allow in -> mix. */ if ((ctl->widget->pflags & HDAA_ADC_MONITOR) && ctl->childwidget->bindas >= 0 && ases[ctl->childwidget->bindas].dir == HDAA_CTL_IN) continue; /* Allow if have common as/seqs. */ if (ctl->widget->bindas == ctl->childwidget->bindas && (ctl->widget->bindseqmask & ctl->childwidget->bindseqmask) != 0) continue; ctl->forcemute = 1; ctl->muted = HDAA_AMP_MUTE_ALL; ctl->left = 0; ctl->right = 0; ctl->enable = 0; if (ctl->ndir == HDAA_CTL_IN) ctl->widget->connsenable[ctl->index] = 0; HDA_BOOTHVERBOSE( device_printf(devinfo->dev, " Disabling crossassociatement connection " "ctl %d nid %d cnid %d.\n", i, ctl->widget->nid, ctl->childwidget->nid); ); } } /* * Find controls to control amplification for source and calculate possible * amplification range. */ static int hdaa_audio_ctl_source_amp(struct hdaa_devinfo *devinfo, nid_t nid, int index, int ossdev, int ctlable, int depth, int *minamp, int *maxamp) { struct hdaa_widget *w, *wc; struct hdaa_audio_ctl *ctl; int i, j, conns = 0, tminamp, tmaxamp, cminamp, cmaxamp, found = 0; if (depth > HDA_PARSE_MAXDEPTH) return (found); w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return (found); /* Count number of active inputs. */ if (depth > 0) { for (j = 0; j < w->nconns; j++) { if (!w->connsenable[j]) continue; conns++; } } /* If this is not a first step - use input mixer. Pins have common input ctl so care must be taken. */ if (depth > 0 && ctlable && (conns == 1 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX)) { ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, index, 1); if (ctl) { ctl->ossmask |= (1 << ossdev); found++; if (*minamp == *maxamp) { *minamp += MINQDB(ctl); *maxamp += MAXQDB(ctl); } } } /* If widget has own ossdev - not traverse it. It will be traversed on its own. */ if (w->ossdev >= 0 && depth > 0) return (found); /* We must not traverse pin */ if ((w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) && depth > 0) return (found); /* record that this widget exports such signal, */ w->ossmask |= (1 << ossdev); /* * If signals mixed, we can't assign controls farther. * Ignore this on depth zero. Caller must knows why. */ if (conns > 1 && w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) ctlable = 0; if (ctlable) { ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_OUT, -1, 1); if (ctl) { ctl->ossmask |= (1 << ossdev); found++; if (*minamp == *maxamp) { *minamp += MINQDB(ctl); *maxamp += MAXQDB(ctl); } } } cminamp = cmaxamp = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { wc = hdaa_widget_get(devinfo, i); if (wc == NULL || wc->enable == 0) continue; for (j = 0; j < wc->nconns; j++) { if (wc->connsenable[j] && wc->conns[j] == nid) { tminamp = tmaxamp = 0; found += hdaa_audio_ctl_source_amp(devinfo, wc->nid, j, ossdev, ctlable, depth + 1, &tminamp, &tmaxamp); if (cminamp == 0 && cmaxamp == 0) { cminamp = tminamp; cmaxamp = tmaxamp; } else if (tminamp != tmaxamp) { cminamp = imax(cminamp, tminamp); cmaxamp = imin(cmaxamp, tmaxamp); } } } } if (*minamp == *maxamp && cminamp < cmaxamp) { *minamp += cminamp; *maxamp += cmaxamp; } return (found); } /* * Find controls to control amplification for destination and calculate * possible amplification range. */ static int hdaa_audio_ctl_dest_amp(struct hdaa_devinfo *devinfo, nid_t nid, int index, int ossdev, int depth, int *minamp, int *maxamp) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w, *wc; struct hdaa_audio_ctl *ctl; int i, j, consumers, tminamp, tmaxamp, cminamp, cmaxamp, found = 0; if (depth > HDA_PARSE_MAXDEPTH) return (found); w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return (found); if (depth > 0) { /* If this node produce output for several consumers, we can't touch it. */ consumers = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { wc = hdaa_widget_get(devinfo, i); if (wc == NULL || wc->enable == 0) continue; for (j = 0; j < wc->nconns; j++) { if (wc->connsenable[j] && wc->conns[j] == nid) consumers++; } } /* The only exception is if real HP redirection is configured and this is a duplication point. XXX: Actually exception is not completely correct. XXX: Duplication point check is not perfect. */ if ((consumers == 2 && (w->bindas < 0 || as[w->bindas].hpredir < 0 || as[w->bindas].fakeredir || (w->bindseqmask & (1 << 15)) == 0)) || consumers > 2) return (found); /* Else use it's output mixer. */ ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_OUT, -1, 1); if (ctl) { ctl->ossmask |= (1 << ossdev); found++; if (*minamp == *maxamp) { *minamp += MINQDB(ctl); *maxamp += MAXQDB(ctl); } } } /* We must not traverse pin */ if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && depth > 0) return (found); cminamp = cmaxamp = 0; for (i = 0; i < w->nconns; i++) { if (w->connsenable[i] == 0) continue; if (index >= 0 && i != index) continue; tminamp = tmaxamp = 0; ctl = hdaa_audio_ctl_amp_get(devinfo, w->nid, HDAA_CTL_IN, i, 1); if (ctl) { ctl->ossmask |= (1 << ossdev); found++; if (*minamp == *maxamp) { tminamp += MINQDB(ctl); tmaxamp += MAXQDB(ctl); } } found += hdaa_audio_ctl_dest_amp(devinfo, w->conns[i], -1, ossdev, depth + 1, &tminamp, &tmaxamp); if (cminamp == 0 && cmaxamp == 0) { cminamp = tminamp; cmaxamp = tmaxamp; } else if (tminamp != tmaxamp) { cminamp = imax(cminamp, tminamp); cmaxamp = imin(cmaxamp, tmaxamp); } } if (*minamp == *maxamp && cminamp < cmaxamp) { *minamp += cminamp; *maxamp += cmaxamp; } return (found); } /* * Assign OSS names to sound sources */ static void hdaa_audio_assign_names(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w; int i, j; int type = -1, use, used = 0; static const int types[7][13] = { { SOUND_MIXER_LINE, SOUND_MIXER_LINE1, SOUND_MIXER_LINE2, SOUND_MIXER_LINE3, -1 }, /* line */ { SOUND_MIXER_MONITOR, SOUND_MIXER_MIC, -1 }, /* int mic */ { SOUND_MIXER_MIC, SOUND_MIXER_MONITOR, -1 }, /* ext mic */ { SOUND_MIXER_CD, -1 }, /* cd */ { SOUND_MIXER_SPEAKER, -1 }, /* speaker */ { SOUND_MIXER_DIGITAL1, SOUND_MIXER_DIGITAL2, SOUND_MIXER_DIGITAL3, -1 }, /* digital */ { SOUND_MIXER_LINE, SOUND_MIXER_LINE1, SOUND_MIXER_LINE2, SOUND_MIXER_LINE3, SOUND_MIXER_PHONEIN, SOUND_MIXER_PHONEOUT, SOUND_MIXER_VIDEO, SOUND_MIXER_RADIO, SOUND_MIXER_DIGITAL1, SOUND_MIXER_DIGITAL2, SOUND_MIXER_DIGITAL3, SOUND_MIXER_MONITOR, -1 } /* others */ }; /* Surely known names */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->bindas == -1) continue; use = -1; switch (w->type) { case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX: if (as[w->bindas].dir == HDAA_CTL_OUT) break; type = -1; switch (w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK) { case HDA_CONFIG_DEFAULTCONF_DEVICE_LINE_IN: type = 0; break; case HDA_CONFIG_DEFAULTCONF_DEVICE_MIC_IN: if ((w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_MASK) == HDA_CONFIG_DEFAULTCONF_CONNECTIVITY_JACK) break; type = 1; break; case HDA_CONFIG_DEFAULTCONF_DEVICE_CD: type = 3; break; case HDA_CONFIG_DEFAULTCONF_DEVICE_SPEAKER: type = 4; break; case HDA_CONFIG_DEFAULTCONF_DEVICE_SPDIF_IN: case HDA_CONFIG_DEFAULTCONF_DEVICE_DIGITAL_OTHER_IN: type = 5; break; } if (type == -1) break; j = 0; while (types[type][j] >= 0 && (used & (1 << types[type][j])) != 0) { j++; } if (types[type][j] >= 0) use = types[type][j]; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT: use = SOUND_MIXER_PCM; break; case HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_BEEP_WIDGET: use = SOUND_MIXER_SPEAKER; break; default: break; } if (use >= 0) { w->ossdev = use; used |= (1 << use); } } /* Semi-known names */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->ossdev >= 0) continue; if (w->bindas == -1) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (as[w->bindas].dir == HDAA_CTL_OUT) continue; type = -1; switch (w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK) { case HDA_CONFIG_DEFAULTCONF_DEVICE_LINE_OUT: case HDA_CONFIG_DEFAULTCONF_DEVICE_SPEAKER: case HDA_CONFIG_DEFAULTCONF_DEVICE_HP_OUT: case HDA_CONFIG_DEFAULTCONF_DEVICE_AUX: type = 0; break; case HDA_CONFIG_DEFAULTCONF_DEVICE_MIC_IN: type = 2; break; case HDA_CONFIG_DEFAULTCONF_DEVICE_SPDIF_OUT: case HDA_CONFIG_DEFAULTCONF_DEVICE_DIGITAL_OTHER_OUT: type = 5; break; } if (type == -1) break; j = 0; while (types[type][j] >= 0 && (used & (1 << types[type][j])) != 0) { j++; } if (types[type][j] >= 0) { w->ossdev = types[type][j]; used |= (1 << types[type][j]); } } /* Others */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->ossdev >= 0) continue; if (w->bindas == -1) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (as[w->bindas].dir == HDAA_CTL_OUT) continue; j = 0; while (types[6][j] >= 0 && (used & (1 << types[6][j])) != 0) { j++; } if (types[6][j] >= 0) { w->ossdev = types[6][j]; used |= (1 << types[6][j]); } } } static void hdaa_audio_build_tree(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; int j, res; /* Trace all associations in order of their numbers. */ for (j = 0; j < devinfo->ascnt; j++) { if (as[j].enable == 0) continue; HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Tracing association %d (%d)\n", j, as[j].index); ); if (as[j].dir == HDAA_CTL_OUT) { retry: res = hdaa_audio_trace_as_out(devinfo, j, 0); if (res == 0 && as[j].hpredir >= 0 && as[j].fakeredir == 0) { /* If CODEC can't do analog HP redirection try to make it using one more DAC. */ as[j].fakeredir = 1; goto retry; } } else if (as[j].mixed) res = hdaa_audio_trace_as_in(devinfo, j); else res = hdaa_audio_trace_as_in_mch(devinfo, j, 0); if (res) { HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Association %d (%d) trace succeeded\n", j, as[j].index); ); } else { HDA_BOOTVERBOSE( device_printf(devinfo->dev, "Association %d (%d) trace failed\n", j, as[j].index); ); as[j].enable = 0; } } /* Look for additional DACs/ADCs. */ for (j = 0; j < devinfo->ascnt; j++) { if (as[j].enable == 0) continue; hdaa_audio_adddac(devinfo, j); } /* Trace mixer and beeper pseudo associations. */ hdaa_audio_trace_as_extra(devinfo); } /* * Store in pdevinfo new data about whether and how we can control signal * for OSS device to/from specified widget. */ static void hdaa_adjust_amp(struct hdaa_widget *w, int ossdev, int found, int minamp, int maxamp) { struct hdaa_devinfo *devinfo = w->devinfo; struct hdaa_pcm_devinfo *pdevinfo; if (w->bindas >= 0) pdevinfo = devinfo->as[w->bindas].pdevinfo; else pdevinfo = &devinfo->devs[0]; if (found) pdevinfo->ossmask |= (1 << ossdev); if (minamp == 0 && maxamp == 0) return; if (pdevinfo->minamp[ossdev] == 0 && pdevinfo->maxamp[ossdev] == 0) { pdevinfo->minamp[ossdev] = minamp; pdevinfo->maxamp[ossdev] = maxamp; } else { pdevinfo->minamp[ossdev] = imax(pdevinfo->minamp[ossdev], minamp); pdevinfo->maxamp[ossdev] = imin(pdevinfo->maxamp[ossdev], maxamp); } } /* * Trace signals from/to all possible sources/destionstions to find possible * recording sources, OSS device control ranges and to assign controls. */ static void hdaa_audio_assign_mixers(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w, *cw; int i, j, minamp, maxamp, found; /* Assign mixers to the tree. */ for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; minamp = maxamp = 0; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_BEEP_WIDGET || (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && as[w->bindas].dir == HDAA_CTL_IN)) { if (w->ossdev < 0) continue; found = hdaa_audio_ctl_source_amp(devinfo, w->nid, -1, w->ossdev, 1, 0, &minamp, &maxamp); hdaa_adjust_amp(w, w->ossdev, found, minamp, maxamp); } else if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) { found = hdaa_audio_ctl_dest_amp(devinfo, w->nid, -1, SOUND_MIXER_RECLEV, 0, &minamp, &maxamp); hdaa_adjust_amp(w, SOUND_MIXER_RECLEV, found, minamp, maxamp); } else if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && as[w->bindas].dir == HDAA_CTL_OUT) { found = hdaa_audio_ctl_dest_amp(devinfo, w->nid, -1, SOUND_MIXER_VOLUME, 0, &minamp, &maxamp); hdaa_adjust_amp(w, SOUND_MIXER_VOLUME, found, minamp, maxamp); } if (w->ossdev == SOUND_MIXER_IMIX) { minamp = maxamp = 0; found = hdaa_audio_ctl_source_amp(devinfo, w->nid, -1, w->ossdev, 1, 0, &minamp, &maxamp); if (minamp == maxamp) { /* If we are unable to control input monitor as source - try to control it as destination. */ found += hdaa_audio_ctl_dest_amp(devinfo, w->nid, -1, w->ossdev, 0, &minamp, &maxamp); w->pflags |= HDAA_IMIX_AS_DST; } hdaa_adjust_amp(w, w->ossdev, found, minamp, maxamp); } if (w->pflags & HDAA_ADC_MONITOR) { for (j = 0; j < w->nconns; j++) { if (!w->connsenable[j]) continue; cw = hdaa_widget_get(devinfo, w->conns[j]); if (cw == NULL || cw->enable == 0) continue; if (cw->bindas == -1) continue; if (cw->bindas >= 0 && as[cw->bindas].dir != HDAA_CTL_IN) continue; minamp = maxamp = 0; found = hdaa_audio_ctl_dest_amp(devinfo, w->nid, j, SOUND_MIXER_IGAIN, 0, &minamp, &maxamp); hdaa_adjust_amp(w, SOUND_MIXER_IGAIN, found, minamp, maxamp); } } } } static void hdaa_audio_prepare_pin_ctrl(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w; uint32_t pincap; int i; for (i = 0; i < devinfo->nodecnt; i++) { w = &devinfo->widget[i]; if (w == NULL) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX && w->waspin == 0) continue; pincap = w->wclass.pin.cap; /* Disable everything. */ w->wclass.pin.ctrl &= ~( HDA_CMD_SET_PIN_WIDGET_CTRL_HPHN_ENABLE | HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE | HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE | HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK); if (w->enable == 0) { /* Pin is unused so left it disabled. */ continue; } else if (w->waspin) { /* Enable input for beeper input. */ w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE; } else if (w->bindas < 0 || as[w->bindas].enable == 0) { /* Pin is unused so left it disabled. */ continue; } else if (as[w->bindas].dir == HDAA_CTL_IN) { /* Input pin, configure for input. */ if (HDA_PARAM_PIN_CAP_INPUT_CAP(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE; if ((devinfo->quirks & HDAA_QUIRK_IVREF100) && HDA_PARAM_PIN_CAP_VREF_CTRL_100(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE( HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_100); else if ((devinfo->quirks & HDAA_QUIRK_IVREF80) && HDA_PARAM_PIN_CAP_VREF_CTRL_80(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE( HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_80); else if ((devinfo->quirks & HDAA_QUIRK_IVREF50) && HDA_PARAM_PIN_CAP_VREF_CTRL_50(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE( HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_50); } else { /* Output pin, configure for output. */ if (HDA_PARAM_PIN_CAP_OUTPUT_CAP(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE; if (HDA_PARAM_PIN_CAP_HEADPHONE_CAP(pincap) && (w->wclass.pin.config & HDA_CONFIG_DEFAULTCONF_DEVICE_MASK) == HDA_CONFIG_DEFAULTCONF_DEVICE_HP_OUT) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_HPHN_ENABLE; if ((devinfo->quirks & HDAA_QUIRK_OVREF100) && HDA_PARAM_PIN_CAP_VREF_CTRL_100(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE( HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_100); else if ((devinfo->quirks & HDAA_QUIRK_OVREF80) && HDA_PARAM_PIN_CAP_VREF_CTRL_80(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE( HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_80); else if ((devinfo->quirks & HDAA_QUIRK_OVREF50) && HDA_PARAM_PIN_CAP_VREF_CTRL_50(pincap)) w->wclass.pin.ctrl |= HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE( HDA_CMD_PIN_WIDGET_CTRL_VREF_ENABLE_50); } } } static void hdaa_audio_ctl_commit(struct hdaa_devinfo *devinfo) { struct hdaa_audio_ctl *ctl; int i, z; i = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { if (ctl->enable == 0 || ctl->ossmask != 0) { /* Mute disabled and mixer controllable controls. * Last will be initialized by mixer_init(). * This expected to reduce click on startup. */ hdaa_audio_ctl_amp_set(ctl, HDAA_AMP_MUTE_ALL, 0, 0); continue; } /* Init fixed controls to 0dB amplification. */ z = ctl->offset; if (z > ctl->step) z = ctl->step; hdaa_audio_ctl_amp_set(ctl, HDAA_AMP_MUTE_NONE, z, z); } } static void hdaa_gpio_commit(struct hdaa_devinfo *devinfo) { uint32_t gdata, gmask, gdir; int i, numgpio; numgpio = HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap); if (devinfo->gpio != 0 && numgpio != 0) { gdata = hda_command(devinfo->dev, HDA_CMD_GET_GPIO_DATA(0, devinfo->nid)); gmask = hda_command(devinfo->dev, HDA_CMD_GET_GPIO_ENABLE_MASK(0, devinfo->nid)); gdir = hda_command(devinfo->dev, HDA_CMD_GET_GPIO_DIRECTION(0, devinfo->nid)); for (i = 0; i < numgpio; i++) { if ((devinfo->gpio & HDAA_GPIO_MASK(i)) == HDAA_GPIO_SET(i)) { gdata |= (1 << i); gmask |= (1 << i); gdir |= (1 << i); } else if ((devinfo->gpio & HDAA_GPIO_MASK(i)) == HDAA_GPIO_CLEAR(i)) { gdata &= ~(1 << i); gmask |= (1 << i); gdir |= (1 << i); } else if ((devinfo->gpio & HDAA_GPIO_MASK(i)) == HDAA_GPIO_DISABLE(i)) { gmask &= ~(1 << i); } else if ((devinfo->gpio & HDAA_GPIO_MASK(i)) == HDAA_GPIO_INPUT(i)) { gmask |= (1 << i); gdir &= ~(1 << i); } } HDA_BOOTVERBOSE( device_printf(devinfo->dev, "GPIO commit\n"); ); hda_command(devinfo->dev, HDA_CMD_SET_GPIO_ENABLE_MASK(0, devinfo->nid, gmask)); hda_command(devinfo->dev, HDA_CMD_SET_GPIO_DIRECTION(0, devinfo->nid, gdir)); hda_command(devinfo->dev, HDA_CMD_SET_GPIO_DATA(0, devinfo->nid, gdata)); HDA_BOOTVERBOSE( hdaa_dump_gpio(devinfo); ); } } static void hdaa_gpo_commit(struct hdaa_devinfo *devinfo) { uint32_t gdata; int i, numgpo; numgpo = HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap); if (devinfo->gpo != 0 && numgpo != 0) { gdata = hda_command(devinfo->dev, HDA_CMD_GET_GPO_DATA(0, devinfo->nid)); for (i = 0; i < numgpo; i++) { if ((devinfo->gpio & HDAA_GPIO_MASK(i)) == HDAA_GPIO_SET(i)) { gdata |= (1 << i); } else if ((devinfo->gpio & HDAA_GPIO_MASK(i)) == HDAA_GPIO_CLEAR(i)) { gdata &= ~(1 << i); } } HDA_BOOTVERBOSE( device_printf(devinfo->dev, "GPO commit\n"); ); hda_command(devinfo->dev, HDA_CMD_SET_GPO_DATA(0, devinfo->nid, gdata)); HDA_BOOTVERBOSE( hdaa_dump_gpo(devinfo); ); } } static void hdaa_audio_commit(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; int i; /* Commit controls. */ hdaa_audio_ctl_commit(devinfo); /* Commit selectors, pins and EAPD. */ for (i = 0; i < devinfo->nodecnt; i++) { w = &devinfo->widget[i]; if (w == NULL) continue; if (w->selconn == -1) w->selconn = 0; if (w->nconns > 0) hdaa_widget_connection_select(w, w->selconn); if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->waspin) { hda_command(devinfo->dev, HDA_CMD_SET_PIN_WIDGET_CTRL(0, w->nid, w->wclass.pin.ctrl)); } if (w->param.eapdbtl != HDA_INVALID) { uint32_t val; val = w->param.eapdbtl; if (devinfo->quirks & HDAA_QUIRK_EAPDINV) val ^= HDA_CMD_SET_EAPD_BTL_ENABLE_EAPD; hda_command(devinfo->dev, HDA_CMD_SET_EAPD_BTL_ENABLE(0, w->nid, val)); } } hdaa_gpio_commit(devinfo); hdaa_gpo_commit(devinfo); } static void hdaa_powerup(struct hdaa_devinfo *devinfo) { int i; hda_command(devinfo->dev, HDA_CMD_SET_POWER_STATE(0, devinfo->nid, HDA_CMD_POWER_STATE_D0)); DELAY(100); for (i = devinfo->startnode; i < devinfo->endnode; i++) { hda_command(devinfo->dev, HDA_CMD_SET_POWER_STATE(0, i, HDA_CMD_POWER_STATE_D0)); } DELAY(1000); } static int hdaa_pcmchannel_setup(struct hdaa_chan *ch) { struct hdaa_devinfo *devinfo = ch->devinfo; struct hdaa_audio_as *as = devinfo->as; struct hdaa_widget *w; uint32_t cap, fmtcap, pcmcap; int i, j, ret, channels, onlystereo; uint16_t pinset; ch->caps = hdaa_caps; ch->caps.fmtlist = ch->fmtlist; ch->bit16 = 1; ch->bit32 = 0; ch->pcmrates[0] = 48000; ch->pcmrates[1] = 0; ch->stripecap = 0xff; ret = 0; channels = 0; onlystereo = 1; pinset = 0; fmtcap = devinfo->supp_stream_formats; pcmcap = devinfo->supp_pcm_size_rate; for (i = 0; i < 16; i++) { /* Check as is correct */ if (ch->as < 0) break; /* Cound only present DACs */ if (as[ch->as].dacs[ch->asindex][i] <= 0) continue; /* Ignore duplicates */ for (j = 0; j < ret; j++) { if (ch->io[j] == as[ch->as].dacs[ch->asindex][i]) break; } if (j < ret) continue; w = hdaa_widget_get(devinfo, as[ch->as].dacs[ch->asindex][i]); if (w == NULL || w->enable == 0) continue; cap = w->param.supp_stream_formats; if (!HDA_PARAM_SUPP_STREAM_FORMATS_PCM(cap) && !HDA_PARAM_SUPP_STREAM_FORMATS_AC3(cap)) continue; /* Many CODECs does not declare AC3 support on SPDIF. I don't beleave that they doesn't support it! */ if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) cap |= HDA_PARAM_SUPP_STREAM_FORMATS_AC3_MASK; if (ret == 0) { fmtcap = cap; pcmcap = w->param.supp_pcm_size_rate; } else { fmtcap &= cap; pcmcap &= w->param.supp_pcm_size_rate; } ch->io[ret++] = as[ch->as].dacs[ch->asindex][i]; ch->stripecap &= w->wclass.conv.stripecap; /* Do not count redirection pin/dac channels. */ if (i == 15 && as[ch->as].hpredir >= 0) continue; channels += HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap) + 1; if (HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap) != 1) onlystereo = 0; pinset |= (1 << i); } ch->io[ret] = -1; ch->channels = channels; if (as[ch->as].fakeredir) ret--; /* Standard speaks only about stereo pins and playback, ... */ if ((!onlystereo) || as[ch->as].mixed) pinset = 0; /* ..., but there it gives us info about speakers layout. */ as[ch->as].pinset = pinset; ch->supp_stream_formats = fmtcap; ch->supp_pcm_size_rate = pcmcap; /* * 8bit = 0 * 16bit = 1 * 20bit = 2 * 24bit = 3 * 32bit = 4 */ if (ret > 0) { i = 0; if (HDA_PARAM_SUPP_STREAM_FORMATS_PCM(fmtcap)) { if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16BIT(pcmcap)) ch->bit16 = 1; else if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8BIT(pcmcap)) ch->bit16 = 0; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_24BIT(pcmcap)) ch->bit32 = 3; else if (HDA_PARAM_SUPP_PCM_SIZE_RATE_20BIT(pcmcap)) ch->bit32 = 2; else if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32BIT(pcmcap)) ch->bit32 = 4; if (!(devinfo->quirks & HDAA_QUIRK_FORCESTEREO)) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 1, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 1, 0); } if (channels >= 2) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 2, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 2, 0); } if (channels >= 3 && !onlystereo) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 3, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 3, 0); ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 3, 1); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 3, 1); } if (channels >= 4) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 4, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 4, 0); if (!onlystereo) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 4, 1); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 4, 1); } } if (channels >= 5 && !onlystereo) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 5, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 5, 0); ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 5, 1); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 5, 1); } if (channels >= 6) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 6, 1); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 6, 1); if (!onlystereo) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 6, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 6, 0); } } if (channels >= 7 && !onlystereo) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 7, 0); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 7, 0); ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 7, 1); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 7, 1); } if (channels >= 8) { ch->fmtlist[i++] = SND_FORMAT(AFMT_S16_LE, 8, 1); if (ch->bit32) ch->fmtlist[i++] = SND_FORMAT(AFMT_S32_LE, 8, 1); } } if (HDA_PARAM_SUPP_STREAM_FORMATS_AC3(fmtcap)) { ch->fmtlist[i++] = SND_FORMAT(AFMT_AC3, 2, 0); if (channels >= 8) { ch->fmtlist[i++] = SND_FORMAT(AFMT_AC3, 8, 0); ch->fmtlist[i++] = SND_FORMAT(AFMT_AC3, 8, 1); } } ch->fmtlist[i] = 0; i = 0; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8KHZ(pcmcap)) ch->pcmrates[i++] = 8000; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_11KHZ(pcmcap)) ch->pcmrates[i++] = 11025; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16KHZ(pcmcap)) ch->pcmrates[i++] = 16000; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_22KHZ(pcmcap)) ch->pcmrates[i++] = 22050; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32KHZ(pcmcap)) ch->pcmrates[i++] = 32000; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_44KHZ(pcmcap)) ch->pcmrates[i++] = 44100; /* if (HDA_PARAM_SUPP_PCM_SIZE_RATE_48KHZ(pcmcap)) */ ch->pcmrates[i++] = 48000; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_88KHZ(pcmcap)) ch->pcmrates[i++] = 88200; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_96KHZ(pcmcap)) ch->pcmrates[i++] = 96000; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_176KHZ(pcmcap)) ch->pcmrates[i++] = 176400; if (HDA_PARAM_SUPP_PCM_SIZE_RATE_192KHZ(pcmcap)) ch->pcmrates[i++] = 192000; /* if (HDA_PARAM_SUPP_PCM_SIZE_RATE_384KHZ(pcmcap)) */ ch->pcmrates[i] = 0; if (i > 0) { ch->caps.minspeed = ch->pcmrates[0]; ch->caps.maxspeed = ch->pcmrates[i - 1]; } } return (ret); } static void hdaa_prepare_pcms(struct hdaa_devinfo *devinfo) { struct hdaa_audio_as *as = devinfo->as; int i, j, k, apdev = 0, ardev = 0, dpdev = 0, drdev = 0; for (i = 0; i < devinfo->ascnt; i++) { if (as[i].enable == 0) continue; if (as[i].dir == HDAA_CTL_IN) { if (as[i].digital) drdev++; else ardev++; } else { if (as[i].digital) dpdev++; else apdev++; } } devinfo->num_devs = max(ardev, apdev) + max(drdev, dpdev); devinfo->devs = - (struct hdaa_pcm_devinfo *)malloc( - devinfo->num_devs * sizeof(struct hdaa_pcm_devinfo), + (struct hdaa_pcm_devinfo *)mallocarray( + devinfo->num_devs, sizeof(struct hdaa_pcm_devinfo), M_HDAA, M_ZERO | M_NOWAIT); if (devinfo->devs == NULL) { device_printf(devinfo->dev, "Unable to allocate memory for devices\n"); return; } for (i = 0; i < devinfo->num_devs; i++) { devinfo->devs[i].index = i; devinfo->devs[i].devinfo = devinfo; devinfo->devs[i].playas = -1; devinfo->devs[i].recas = -1; devinfo->devs[i].digital = 255; } for (i = 0; i < devinfo->ascnt; i++) { if (as[i].enable == 0) continue; for (j = 0; j < devinfo->num_devs; j++) { if (devinfo->devs[j].digital != 255 && (!devinfo->devs[j].digital) != (!as[i].digital)) continue; if (as[i].dir == HDAA_CTL_IN) { if (devinfo->devs[j].recas >= 0) continue; devinfo->devs[j].recas = i; } else { if (devinfo->devs[j].playas >= 0) continue; devinfo->devs[j].playas = i; } as[i].pdevinfo = &devinfo->devs[j]; for (k = 0; k < as[i].num_chans; k++) { devinfo->chans[as[i].chans[k]].pdevinfo = &devinfo->devs[j]; } devinfo->devs[j].digital = as[i].digital; break; } } } static void hdaa_create_pcms(struct hdaa_devinfo *devinfo) { int i; for (i = 0; i < devinfo->num_devs; i++) { struct hdaa_pcm_devinfo *pdevinfo = &devinfo->devs[i]; pdevinfo->dev = device_add_child(devinfo->dev, "pcm", -1); device_set_ivars(pdevinfo->dev, (void *)pdevinfo); } } static void hdaa_dump_ctls(struct hdaa_pcm_devinfo *pdevinfo, const char *banner, uint32_t flag) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_audio_ctl *ctl; char buf[64]; int i, j, printed = 0; if (flag == 0) { flag = ~(SOUND_MASK_VOLUME | SOUND_MASK_PCM | SOUND_MASK_CD | SOUND_MASK_LINE | SOUND_MASK_RECLEV | SOUND_MASK_MIC | SOUND_MASK_SPEAKER | SOUND_MASK_IGAIN | SOUND_MASK_OGAIN | SOUND_MASK_IMIX | SOUND_MASK_MONITOR); } for (j = 0; j < SOUND_MIXER_NRDEVICES; j++) { if ((flag & (1 << j)) == 0) continue; i = 0; printed = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { if (ctl->enable == 0 || ctl->widget->enable == 0) continue; if (!((pdevinfo->playas >= 0 && ctl->widget->bindas == pdevinfo->playas) || (pdevinfo->recas >= 0 && ctl->widget->bindas == pdevinfo->recas) || (ctl->widget->bindas == -2 && pdevinfo->index == 0))) continue; if ((ctl->ossmask & (1 << j)) == 0) continue; if (printed == 0) { if (banner != NULL) { device_printf(pdevinfo->dev, "%s", banner); } else { device_printf(pdevinfo->dev, "Unknown Ctl"); } printf(" (OSS: %s)", hdaa_audio_ctl_ossmixer_mask2allname(1 << j, buf, sizeof(buf))); if (pdevinfo->ossmask & (1 << j)) { printf(": %+d/%+ddB\n", pdevinfo->minamp[j] / 4, pdevinfo->maxamp[j] / 4); } else printf("\n"); printed = 1; } device_printf(pdevinfo->dev, " +- ctl %2d (nid %3d %s", i, ctl->widget->nid, (ctl->ndir == HDAA_CTL_IN)?"in ":"out"); if (ctl->ndir == HDAA_CTL_IN && ctl->ndir == ctl->dir) printf(" %2d): ", ctl->index); else printf("): "); if (ctl->step > 0) { printf("%+d/%+ddB (%d steps)%s\n", MINQDB(ctl) / 4, MAXQDB(ctl) / 4, ctl->step + 1, ctl->mute?" + mute":""); } else printf("%s\n", ctl->mute?"mute":""); } } if (printed) device_printf(pdevinfo->dev, "\n"); } static void hdaa_dump_audio_formats(device_t dev, uint32_t fcap, uint32_t pcmcap) { uint32_t cap; cap = fcap; if (cap != 0) { device_printf(dev, " Stream cap: 0x%08x", cap); if (HDA_PARAM_SUPP_STREAM_FORMATS_AC3(cap)) printf(" AC3"); if (HDA_PARAM_SUPP_STREAM_FORMATS_FLOAT32(cap)) printf(" FLOAT32"); if (HDA_PARAM_SUPP_STREAM_FORMATS_PCM(cap)) printf(" PCM"); printf("\n"); } cap = pcmcap; if (cap != 0) { device_printf(dev, " PCM cap: 0x%08x", cap); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8BIT(cap)) printf(" 8"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16BIT(cap)) printf(" 16"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_20BIT(cap)) printf(" 20"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_24BIT(cap)) printf(" 24"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32BIT(cap)) printf(" 32"); printf(" bits,"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_8KHZ(cap)) printf(" 8"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_11KHZ(cap)) printf(" 11"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_16KHZ(cap)) printf(" 16"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_22KHZ(cap)) printf(" 22"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_32KHZ(cap)) printf(" 32"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_44KHZ(cap)) printf(" 44"); printf(" 48"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_88KHZ(cap)) printf(" 88"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_96KHZ(cap)) printf(" 96"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_176KHZ(cap)) printf(" 176"); if (HDA_PARAM_SUPP_PCM_SIZE_RATE_192KHZ(cap)) printf(" 192"); printf(" KHz\n"); } } static void hdaa_dump_pin(struct hdaa_widget *w) { uint32_t pincap; pincap = w->wclass.pin.cap; device_printf(w->devinfo->dev, " Pin cap: 0x%08x", pincap); if (HDA_PARAM_PIN_CAP_IMP_SENSE_CAP(pincap)) printf(" ISC"); if (HDA_PARAM_PIN_CAP_TRIGGER_REQD(pincap)) printf(" TRQD"); if (HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(pincap)) printf(" PDC"); if (HDA_PARAM_PIN_CAP_HEADPHONE_CAP(pincap)) printf(" HP"); if (HDA_PARAM_PIN_CAP_OUTPUT_CAP(pincap)) printf(" OUT"); if (HDA_PARAM_PIN_CAP_INPUT_CAP(pincap)) printf(" IN"); if (HDA_PARAM_PIN_CAP_BALANCED_IO_PINS(pincap)) printf(" BAL"); if (HDA_PARAM_PIN_CAP_HDMI(pincap)) printf(" HDMI"); if (HDA_PARAM_PIN_CAP_VREF_CTRL(pincap)) { printf(" VREF["); if (HDA_PARAM_PIN_CAP_VREF_CTRL_50(pincap)) printf(" 50"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_80(pincap)) printf(" 80"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_100(pincap)) printf(" 100"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_GROUND(pincap)) printf(" GROUND"); if (HDA_PARAM_PIN_CAP_VREF_CTRL_HIZ(pincap)) printf(" HIZ"); printf(" ]"); } if (HDA_PARAM_PIN_CAP_EAPD_CAP(pincap)) printf(" EAPD"); if (HDA_PARAM_PIN_CAP_DP(pincap)) printf(" DP"); if (HDA_PARAM_PIN_CAP_HBR(pincap)) printf(" HBR"); printf("\n"); device_printf(w->devinfo->dev, " Pin config: 0x%08x\n", w->wclass.pin.config); device_printf(w->devinfo->dev, " Pin control: 0x%08x", w->wclass.pin.ctrl); if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_HPHN_ENABLE) printf(" HP"); if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_IN_ENABLE) printf(" IN"); if (w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_OUT_ENABLE) printf(" OUT"); if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) { if ((w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) == 0x03) printf(" HBR"); else if ((w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) != 0) printf(" EPTs"); } else { if ((w->wclass.pin.ctrl & HDA_CMD_SET_PIN_WIDGET_CTRL_VREF_ENABLE_MASK) != 0) printf(" VREFs"); } printf("\n"); } static void hdaa_dump_pin_config(struct hdaa_widget *w, uint32_t conf) { device_printf(w->devinfo->dev, "%2d %08x %-2d %-2d " "%-13s %-5s %-7s %-10s %-7s %d%s\n", w->nid, conf, HDA_CONFIG_DEFAULTCONF_ASSOCIATION(conf), HDA_CONFIG_DEFAULTCONF_SEQUENCE(conf), HDA_DEVS[HDA_CONFIG_DEFAULTCONF_DEVICE(conf)], HDA_CONNS[HDA_CONFIG_DEFAULTCONF_CONNECTIVITY(conf)], HDA_CONNECTORS[HDA_CONFIG_DEFAULTCONF_CONNECTION_TYPE(conf)], HDA_LOCS[HDA_CONFIG_DEFAULTCONF_LOCATION(conf)], HDA_COLORS[HDA_CONFIG_DEFAULTCONF_COLOR(conf)], HDA_CONFIG_DEFAULTCONF_MISC(conf), (w->enable == 0)?" DISA":""); } static void hdaa_dump_pin_configs(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w; int i; device_printf(devinfo->dev, "nid 0x as seq " "device conn jack loc color misc\n"); for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; hdaa_dump_pin_config(w, w->wclass.pin.config); } } static void hdaa_dump_amp(device_t dev, uint32_t cap, const char *banner) { int offset, size, step; offset = HDA_PARAM_OUTPUT_AMP_CAP_OFFSET(cap); size = HDA_PARAM_OUTPUT_AMP_CAP_STEPSIZE(cap); step = HDA_PARAM_OUTPUT_AMP_CAP_NUMSTEPS(cap); device_printf(dev, " %s amp: 0x%08x " "mute=%d step=%d size=%d offset=%d (%+d/%+ddB)\n", banner, cap, HDA_PARAM_OUTPUT_AMP_CAP_MUTE_CAP(cap), step, size, offset, ((0 - offset) * (size + 1)) / 4, ((step - offset) * (size + 1)) / 4); } static void hdaa_dump_nodes(struct hdaa_devinfo *devinfo) { struct hdaa_widget *w, *cw; char buf[64]; int i, j; device_printf(devinfo->dev, "\n"); device_printf(devinfo->dev, "Default parameters:\n"); hdaa_dump_audio_formats(devinfo->dev, devinfo->supp_stream_formats, devinfo->supp_pcm_size_rate); hdaa_dump_amp(devinfo->dev, devinfo->inamp_cap, " Input"); hdaa_dump_amp(devinfo->dev, devinfo->outamp_cap, "Output"); for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL) { device_printf(devinfo->dev, "Ghost widget nid=%d\n", i); continue; } device_printf(devinfo->dev, "\n"); device_printf(devinfo->dev, " nid: %d%s\n", w->nid, (w->enable == 0) ? " [DISABLED]" : ""); device_printf(devinfo->dev, " Name: %s\n", w->name); device_printf(devinfo->dev, " Widget cap: 0x%08x", w->param.widget_cap); if (w->param.widget_cap & 0x0ee1) { if (HDA_PARAM_AUDIO_WIDGET_CAP_LR_SWAP(w->param.widget_cap)) printf(" LRSWAP"); if (HDA_PARAM_AUDIO_WIDGET_CAP_POWER_CTRL(w->param.widget_cap)) printf(" PWR"); if (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap)) printf(" DIGITAL"); if (HDA_PARAM_AUDIO_WIDGET_CAP_UNSOL_CAP(w->param.widget_cap)) printf(" UNSOL"); if (HDA_PARAM_AUDIO_WIDGET_CAP_PROC_WIDGET(w->param.widget_cap)) printf(" PROC"); if (HDA_PARAM_AUDIO_WIDGET_CAP_STRIPE(w->param.widget_cap)) printf(" STRIPE(x%d)", 1 << (fls(w->wclass.conv.stripecap) - 1)); j = HDA_PARAM_AUDIO_WIDGET_CAP_CC(w->param.widget_cap); if (j == 1) printf(" STEREO"); else if (j > 1) printf(" %dCH", j + 1); } printf("\n"); if (w->bindas != -1) { device_printf(devinfo->dev, " Association: %d (0x%04x)\n", w->bindas, w->bindseqmask); } if (w->ossmask != 0 || w->ossdev >= 0) { device_printf(devinfo->dev, " OSS: %s", hdaa_audio_ctl_ossmixer_mask2allname(w->ossmask, buf, sizeof(buf))); if (w->ossdev >= 0) printf(" (%s)", ossnames[w->ossdev]); printf("\n"); } if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_OUTPUT || w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) { hdaa_dump_audio_formats(devinfo->dev, w->param.supp_stream_formats, w->param.supp_pcm_size_rate); } else if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX || w->waspin) hdaa_dump_pin(w); if (w->param.eapdbtl != HDA_INVALID) device_printf(devinfo->dev, " EAPD: 0x%08x\n", w->param.eapdbtl); if (HDA_PARAM_AUDIO_WIDGET_CAP_OUT_AMP(w->param.widget_cap) && w->param.outamp_cap != 0) hdaa_dump_amp(devinfo->dev, w->param.outamp_cap, "Output"); if (HDA_PARAM_AUDIO_WIDGET_CAP_IN_AMP(w->param.widget_cap) && w->param.inamp_cap != 0) hdaa_dump_amp(devinfo->dev, w->param.inamp_cap, " Input"); if (w->nconns > 0) device_printf(devinfo->dev, " Connections: %d\n", w->nconns); for (j = 0; j < w->nconns; j++) { cw = hdaa_widget_get(devinfo, w->conns[j]); device_printf(devinfo->dev, " + %s<- nid=%d [%s]", (w->connsenable[j] == 0)?"[DISABLED] ":"", w->conns[j], (cw == NULL) ? "GHOST!" : cw->name); if (cw == NULL) printf(" [UNKNOWN]"); else if (cw->enable == 0) printf(" [DISABLED]"); if (w->nconns > 1 && w->selconn == j && w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_MIXER) printf(" (selected)"); printf("\n"); } } } static void hdaa_dump_dst_nid(struct hdaa_pcm_devinfo *pdevinfo, nid_t nid, int depth) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w, *cw; char buf[64]; int i; if (depth > HDA_PARSE_MAXDEPTH) return; w = hdaa_widget_get(devinfo, nid); if (w == NULL || w->enable == 0) return; if (depth == 0) device_printf(pdevinfo->dev, "%*s", 4, ""); else device_printf(pdevinfo->dev, "%*s + <- ", 4 + (depth - 1) * 7, ""); printf("nid=%d [%s]", w->nid, w->name); if (depth > 0) { if (w->ossmask == 0) { printf("\n"); return; } printf(" [src: %s]", hdaa_audio_ctl_ossmixer_mask2allname( w->ossmask, buf, sizeof(buf))); if (w->ossdev >= 0) { printf("\n"); return; } } printf("\n"); for (i = 0; i < w->nconns; i++) { if (w->connsenable[i] == 0) continue; cw = hdaa_widget_get(devinfo, w->conns[i]); if (cw == NULL || cw->enable == 0 || cw->bindas == -1) continue; hdaa_dump_dst_nid(pdevinfo, w->conns[i], depth + 1); } } static void hdaa_dump_dac(struct hdaa_pcm_devinfo *pdevinfo) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_audio_as *as; struct hdaa_widget *w; nid_t *nids; int chid, i; if (pdevinfo->playas < 0) return; device_printf(pdevinfo->dev, "Playback:\n"); chid = devinfo->as[pdevinfo->playas].chans[0]; hdaa_dump_audio_formats(pdevinfo->dev, devinfo->chans[chid].supp_stream_formats, devinfo->chans[chid].supp_pcm_size_rate); for (i = 0; i < devinfo->as[pdevinfo->playas].num_chans; i++) { chid = devinfo->as[pdevinfo->playas].chans[i]; device_printf(pdevinfo->dev, " DAC:"); for (nids = devinfo->chans[chid].io; *nids != -1; nids++) printf(" %d", *nids); printf("\n"); } as = &devinfo->as[pdevinfo->playas]; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; w = hdaa_widget_get(devinfo, as->pins[i]); if (w == NULL || w->enable == 0) continue; device_printf(pdevinfo->dev, "\n"); hdaa_dump_dst_nid(pdevinfo, as->pins[i], 0); } device_printf(pdevinfo->dev, "\n"); } static void hdaa_dump_adc(struct hdaa_pcm_devinfo *pdevinfo) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w; nid_t *nids; int chid, i; if (pdevinfo->recas < 0) return; device_printf(pdevinfo->dev, "Record:\n"); chid = devinfo->as[pdevinfo->recas].chans[0]; hdaa_dump_audio_formats(pdevinfo->dev, devinfo->chans[chid].supp_stream_formats, devinfo->chans[chid].supp_pcm_size_rate); for (i = 0; i < devinfo->as[pdevinfo->recas].num_chans; i++) { chid = devinfo->as[pdevinfo->recas].chans[i]; device_printf(pdevinfo->dev, " ADC:"); for (nids = devinfo->chans[chid].io; *nids != -1; nids++) printf(" %d", *nids); printf("\n"); } for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_AUDIO_INPUT) continue; if (w->bindas != pdevinfo->recas) continue; device_printf(pdevinfo->dev, "\n"); hdaa_dump_dst_nid(pdevinfo, i, 0); } device_printf(pdevinfo->dev, "\n"); } static void hdaa_dump_mix(struct hdaa_pcm_devinfo *pdevinfo) { struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_widget *w; int i; int printed = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0) continue; if (w->ossdev != SOUND_MIXER_IMIX) continue; if (w->bindas != pdevinfo->recas) continue; if (printed == 0) { printed = 1; device_printf(pdevinfo->dev, "Input Mix:\n"); } device_printf(pdevinfo->dev, "\n"); hdaa_dump_dst_nid(pdevinfo, i, 0); } if (printed) device_printf(pdevinfo->dev, "\n"); } static void hdaa_pindump(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_widget *w; uint32_t res, pincap, delay; int i; device_printf(dev, "Dumping AFG pins:\n"); device_printf(dev, "nid 0x as seq " "device conn jack loc color misc\n"); for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; hdaa_dump_pin_config(w, w->wclass.pin.config); pincap = w->wclass.pin.cap; device_printf(dev, " Caps: %2s %3s %2s %4s %4s", HDA_PARAM_PIN_CAP_INPUT_CAP(pincap)?"IN":"", HDA_PARAM_PIN_CAP_OUTPUT_CAP(pincap)?"OUT":"", HDA_PARAM_PIN_CAP_HEADPHONE_CAP(pincap)?"HP":"", HDA_PARAM_PIN_CAP_EAPD_CAP(pincap)?"EAPD":"", HDA_PARAM_PIN_CAP_VREF_CTRL(pincap)?"VREF":""); if (HDA_PARAM_PIN_CAP_IMP_SENSE_CAP(pincap) || HDA_PARAM_PIN_CAP_PRESENCE_DETECT_CAP(pincap)) { if (HDA_PARAM_PIN_CAP_TRIGGER_REQD(pincap)) { delay = 0; hda_command(dev, HDA_CMD_SET_PIN_SENSE(0, w->nid, 0)); do { res = hda_command(dev, HDA_CMD_GET_PIN_SENSE(0, w->nid)); if (res != 0x7fffffff && res != 0xffffffff) break; DELAY(10); } while (++delay < 10000); } else { delay = 0; res = hda_command(dev, HDA_CMD_GET_PIN_SENSE(0, w->nid)); } printf(" Sense: 0x%08x (%sconnected%s)", res, (res & HDA_CMD_GET_PIN_SENSE_PRESENCE_DETECT) ? "" : "dis", (HDA_PARAM_AUDIO_WIDGET_CAP_DIGITAL(w->param.widget_cap) && (res & HDA_CMD_GET_PIN_SENSE_ELD_VALID)) ? ", ELD valid" : ""); if (delay > 0) printf(" delay %dus", delay * 10); } printf("\n"); } device_printf(dev, "NumGPIO=%d NumGPO=%d NumGPI=%d GPIWake=%d GPIUnsol=%d\n", HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_GPI_WAKE(devinfo->gpio_cap), HDA_PARAM_GPIO_COUNT_GPI_UNSOL(devinfo->gpio_cap)); hdaa_dump_gpi(devinfo); hdaa_dump_gpio(devinfo); hdaa_dump_gpo(devinfo); } static void hdaa_configure(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_audio_ctl *ctl; int i; HDA_BOOTHVERBOSE( device_printf(dev, "Applying built-in patches...\n"); ); hdaa_patch(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Applying local patches...\n"); ); hdaa_local_patch(devinfo); hdaa_audio_postprocess(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Parsing Ctls...\n"); ); hdaa_audio_ctl_parse(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling nonaudio...\n"); ); hdaa_audio_disable_nonaudio(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling useless...\n"); ); hdaa_audio_disable_useless(devinfo); HDA_BOOTVERBOSE( device_printf(dev, "Patched pins configuration:\n"); hdaa_dump_pin_configs(devinfo); ); HDA_BOOTHVERBOSE( device_printf(dev, "Parsing pin associations...\n"); ); hdaa_audio_as_parse(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Building AFG tree...\n"); ); hdaa_audio_build_tree(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling unassociated " "widgets...\n"); ); hdaa_audio_disable_unas(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling nonselected " "inputs...\n"); ); hdaa_audio_disable_notselected(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling useless...\n"); ); hdaa_audio_disable_useless(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling " "crossassociatement connections...\n"); ); hdaa_audio_disable_crossas(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Disabling useless...\n"); ); hdaa_audio_disable_useless(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Binding associations to channels...\n"); ); hdaa_audio_bind_as(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Assigning names to signal sources...\n"); ); hdaa_audio_assign_names(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Preparing PCM devices...\n"); ); hdaa_prepare_pcms(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Assigning mixers to the tree...\n"); ); hdaa_audio_assign_mixers(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Preparing pin controls...\n"); ); hdaa_audio_prepare_pin_ctrl(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "AFG commit...\n"); ); hdaa_audio_commit(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Applying direct built-in patches...\n"); ); hdaa_patch_direct(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Pin sense init...\n"); ); hdaa_sense_init(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Creating PCM devices...\n"); ); hdaa_create_pcms(devinfo); HDA_BOOTVERBOSE( if (devinfo->quirks != 0) { device_printf(dev, "FG config/quirks:"); for (i = 0; i < nitems(hdaa_quirks_tab); i++) { if ((devinfo->quirks & hdaa_quirks_tab[i].value) == hdaa_quirks_tab[i].value) printf(" %s", hdaa_quirks_tab[i].key); } printf("\n"); } ); HDA_BOOTHVERBOSE( device_printf(dev, "\n"); device_printf(dev, "+-----------+\n"); device_printf(dev, "| HDA NODES |\n"); device_printf(dev, "+-----------+\n"); hdaa_dump_nodes(devinfo); device_printf(dev, "\n"); device_printf(dev, "+----------------+\n"); device_printf(dev, "| HDA AMPLIFIERS |\n"); device_printf(dev, "+----------------+\n"); device_printf(dev, "\n"); i = 0; while ((ctl = hdaa_audio_ctl_each(devinfo, &i)) != NULL) { device_printf(dev, "%3d: nid %3d %s (%s) index %d", i, (ctl->widget != NULL) ? ctl->widget->nid : -1, (ctl->ndir == HDAA_CTL_IN)?"in ":"out", (ctl->dir == HDAA_CTL_IN)?"in ":"out", ctl->index); if (ctl->childwidget != NULL) printf(" cnid %3d", ctl->childwidget->nid); else printf(" "); printf(" ossmask=0x%08x\n", ctl->ossmask); device_printf(dev, " mute: %d step: %3d size: %3d off: %3d%s\n", ctl->mute, ctl->step, ctl->size, ctl->offset, (ctl->enable == 0) ? " [DISABLED]" : ((ctl->ossmask == 0) ? " [UNUSED]" : "")); } device_printf(dev, "\n"); ); } static void hdaa_unconfigure(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_widget *w; int i, j; HDA_BOOTHVERBOSE( device_printf(dev, "Pin sense deinit...\n"); ); hdaa_sense_deinit(devinfo); free(devinfo->ctl, M_HDAA); devinfo->ctl = NULL; devinfo->ctlcnt = 0; free(devinfo->as, M_HDAA); devinfo->as = NULL; devinfo->ascnt = 0; free(devinfo->devs, M_HDAA); devinfo->devs = NULL; devinfo->num_devs = 0; free(devinfo->chans, M_HDAA); devinfo->chans = NULL; devinfo->num_chans = 0; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL) continue; w->enable = 1; w->selconn = -1; w->pflags = 0; w->bindas = -1; w->bindseqmask = 0; w->ossdev = -1; w->ossmask = 0; for (j = 0; j < w->nconns; j++) w->connsenable[j] = 1; if (w->type == HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) w->wclass.pin.config = w->wclass.pin.newconf; if (w->eld != NULL) { w->eld_len = 0; free(w->eld, M_HDAA); w->eld = NULL; } } } static int hdaa_sysctl_gpi_state(SYSCTL_HANDLER_ARGS) { struct hdaa_devinfo *devinfo = oidp->oid_arg1; device_t dev = devinfo->dev; char buf[256]; int n = 0, i, numgpi; uint32_t data = 0; buf[0] = 0; hdaa_lock(devinfo); numgpi = HDA_PARAM_GPIO_COUNT_NUM_GPI(devinfo->gpio_cap); if (numgpi > 0) { data = hda_command(dev, HDA_CMD_GET_GPI_DATA(0, devinfo->nid)); } hdaa_unlock(devinfo); for (i = 0; i < numgpi; i++) { n += snprintf(buf + n, sizeof(buf) - n, "%s%d=%d", n != 0 ? " " : "", i, ((data >> i) & 1)); } return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); } static int hdaa_sysctl_gpio_state(SYSCTL_HANDLER_ARGS) { struct hdaa_devinfo *devinfo = oidp->oid_arg1; device_t dev = devinfo->dev; char buf[256]; int n = 0, i, numgpio; uint32_t data = 0, enable = 0, dir = 0; buf[0] = 0; hdaa_lock(devinfo); numgpio = HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap); if (numgpio > 0) { data = hda_command(dev, HDA_CMD_GET_GPIO_DATA(0, devinfo->nid)); enable = hda_command(dev, HDA_CMD_GET_GPIO_ENABLE_MASK(0, devinfo->nid)); dir = hda_command(dev, HDA_CMD_GET_GPIO_DIRECTION(0, devinfo->nid)); } hdaa_unlock(devinfo); for (i = 0; i < numgpio; i++) { n += snprintf(buf + n, sizeof(buf) - n, "%s%d=", n != 0 ? " " : "", i); if ((enable & (1 << i)) == 0) { n += snprintf(buf + n, sizeof(buf) - n, "disabled"); continue; } n += snprintf(buf + n, sizeof(buf) - n, "%sput(%d)", ((dir >> i) & 1) ? "out" : "in", ((data >> i) & 1)); } return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); } static int hdaa_sysctl_gpio_config(SYSCTL_HANDLER_ARGS) { struct hdaa_devinfo *devinfo = oidp->oid_arg1; char buf[256]; int error, n = 0, i, numgpio; uint32_t gpio, x; gpio = devinfo->newgpio; numgpio = HDA_PARAM_GPIO_COUNT_NUM_GPIO(devinfo->gpio_cap); buf[0] = 0; for (i = 0; i < numgpio; i++) { x = (gpio & HDAA_GPIO_MASK(i)) >> HDAA_GPIO_SHIFT(i); n += snprintf(buf + n, sizeof(buf) - n, "%s%d=%s", n != 0 ? " " : "", i, HDA_GPIO_ACTIONS[x]); } error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); if (strncmp(buf, "0x", 2) == 0) gpio = strtol(buf + 2, NULL, 16); else gpio = hdaa_gpio_patch(gpio, buf); hdaa_lock(devinfo); devinfo->newgpio = devinfo->gpio = gpio; hdaa_gpio_commit(devinfo); hdaa_unlock(devinfo); return (0); } static int hdaa_sysctl_gpo_state(SYSCTL_HANDLER_ARGS) { struct hdaa_devinfo *devinfo = oidp->oid_arg1; device_t dev = devinfo->dev; char buf[256]; int n = 0, i, numgpo; uint32_t data = 0; buf[0] = 0; hdaa_lock(devinfo); numgpo = HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap); if (numgpo > 0) { data = hda_command(dev, HDA_CMD_GET_GPO_DATA(0, devinfo->nid)); } hdaa_unlock(devinfo); for (i = 0; i < numgpo; i++) { n += snprintf(buf + n, sizeof(buf) - n, "%s%d=%d", n != 0 ? " " : "", i, ((data >> i) & 1)); } return (sysctl_handle_string(oidp, buf, sizeof(buf), req)); } static int hdaa_sysctl_gpo_config(SYSCTL_HANDLER_ARGS) { struct hdaa_devinfo *devinfo = oidp->oid_arg1; char buf[256]; int error, n = 0, i, numgpo; uint32_t gpo, x; gpo = devinfo->newgpo; numgpo = HDA_PARAM_GPIO_COUNT_NUM_GPO(devinfo->gpio_cap); buf[0] = 0; for (i = 0; i < numgpo; i++) { x = (gpo & HDAA_GPIO_MASK(i)) >> HDAA_GPIO_SHIFT(i); n += snprintf(buf + n, sizeof(buf) - n, "%s%d=%s", n != 0 ? " " : "", i, HDA_GPIO_ACTIONS[x]); } error = sysctl_handle_string(oidp, buf, sizeof(buf), req); if (error != 0 || req->newptr == NULL) return (error); if (strncmp(buf, "0x", 2) == 0) gpo = strtol(buf + 2, NULL, 16); else gpo = hdaa_gpio_patch(gpo, buf); hdaa_lock(devinfo); devinfo->newgpo = devinfo->gpo = gpo; hdaa_gpo_commit(devinfo); hdaa_unlock(devinfo); return (0); } static int hdaa_sysctl_reconfig(SYSCTL_HANDLER_ARGS) { device_t dev; struct hdaa_devinfo *devinfo; int error, val; dev = oidp->oid_arg1; devinfo = device_get_softc(dev); if (devinfo == NULL) return (EINVAL); val = 0; error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL || val == 0) return (error); HDA_BOOTHVERBOSE( device_printf(dev, "Reconfiguration...\n"); ); if ((error = device_delete_children(dev)) != 0) return (error); hdaa_lock(devinfo); hdaa_unconfigure(dev); hdaa_configure(dev); hdaa_unlock(devinfo); bus_generic_attach(dev); HDA_BOOTHVERBOSE( device_printf(dev, "Reconfiguration done\n"); ); return (0); } static int hdaa_suspend(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); int i; HDA_BOOTHVERBOSE( device_printf(dev, "Suspend...\n"); ); hdaa_lock(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Stop streams...\n"); ); for (i = 0; i < devinfo->num_chans; i++) { if (devinfo->chans[i].flags & HDAA_CHN_RUNNING) { devinfo->chans[i].flags |= HDAA_CHN_SUSPEND; hdaa_channel_stop(&devinfo->chans[i]); } } HDA_BOOTHVERBOSE( device_printf(dev, "Power down FG" " nid=%d to the D3 state...\n", devinfo->nid); ); hda_command(devinfo->dev, HDA_CMD_SET_POWER_STATE(0, devinfo->nid, HDA_CMD_POWER_STATE_D3)); callout_stop(&devinfo->poll_jack); hdaa_unlock(devinfo); callout_drain(&devinfo->poll_jack); HDA_BOOTHVERBOSE( device_printf(dev, "Suspend done\n"); ); return (0); } static int hdaa_resume(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); int i; HDA_BOOTHVERBOSE( device_printf(dev, "Resume...\n"); ); hdaa_lock(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Power up audio FG nid=%d...\n", devinfo->nid); ); hdaa_powerup(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "AFG commit...\n"); ); hdaa_audio_commit(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Applying direct built-in patches...\n"); ); hdaa_patch_direct(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Pin sense init...\n"); ); hdaa_sense_init(devinfo); hdaa_unlock(devinfo); for (i = 0; i < devinfo->num_devs; i++) { struct hdaa_pcm_devinfo *pdevinfo = &devinfo->devs[i]; HDA_BOOTHVERBOSE( device_printf(pdevinfo->dev, "OSS mixer reinitialization...\n"); ); if (mixer_reinit(pdevinfo->dev) == -1) device_printf(pdevinfo->dev, "unable to reinitialize the mixer\n"); } hdaa_lock(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Start streams...\n"); ); for (i = 0; i < devinfo->num_chans; i++) { if (devinfo->chans[i].flags & HDAA_CHN_SUSPEND) { devinfo->chans[i].flags &= ~HDAA_CHN_SUSPEND; hdaa_channel_start(&devinfo->chans[i]); } } hdaa_unlock(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Resume done\n"); ); return (0); } static int hdaa_probe(device_t dev) { const char *pdesc; char buf[128]; if (hda_get_node_type(dev) != HDA_PARAM_FCT_GRP_TYPE_NODE_TYPE_AUDIO) return (ENXIO); pdesc = device_get_desc(device_get_parent(dev)); snprintf(buf, sizeof(buf), "%.*s Audio Function Group", (int)(strlen(pdesc) - 10), pdesc); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } static int hdaa_attach(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); uint32_t res; nid_t nid = hda_get_node_id(dev); devinfo->dev = dev; devinfo->lock = HDAC_GET_MTX(device_get_parent(dev), dev); devinfo->nid = nid; devinfo->newquirks = -1; devinfo->newgpio = -1; devinfo->newgpo = -1; callout_init(&devinfo->poll_jack, 1); devinfo->poll_ival = hz; hdaa_lock(devinfo); res = hda_command(dev, HDA_CMD_GET_PARAMETER(0 , nid, HDA_PARAM_SUB_NODE_COUNT)); hdaa_unlock(devinfo); devinfo->nodecnt = HDA_PARAM_SUB_NODE_COUNT_TOTAL(res); devinfo->startnode = HDA_PARAM_SUB_NODE_COUNT_START(res); devinfo->endnode = devinfo->startnode + devinfo->nodecnt; HDA_BOOTVERBOSE( device_printf(dev, "Subsystem ID: 0x%08x\n", hda_get_subsystem_id(dev)); ); HDA_BOOTHVERBOSE( device_printf(dev, "Audio Function Group at nid=%d: %d subnodes %d-%d\n", nid, devinfo->nodecnt, devinfo->startnode, devinfo->endnode - 1); ); if (devinfo->nodecnt > 0) devinfo->widget = (struct hdaa_widget *)malloc( sizeof(*(devinfo->widget)) * devinfo->nodecnt, M_HDAA, M_WAITOK | M_ZERO); else devinfo->widget = NULL; hdaa_lock(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Powering up...\n"); ); hdaa_powerup(devinfo); HDA_BOOTHVERBOSE( device_printf(dev, "Parsing audio FG...\n"); ); hdaa_audio_parse(devinfo); HDA_BOOTVERBOSE( device_printf(dev, "Original pins configuration:\n"); hdaa_dump_pin_configs(devinfo); ); hdaa_configure(dev); hdaa_unlock(devinfo); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "config", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, &devinfo->newquirks, 0, hdaa_sysctl_quirks, "A", "Configuration options"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "gpi_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, devinfo, 0, hdaa_sysctl_gpi_state, "A", "GPI state"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "gpio_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, devinfo, 0, hdaa_sysctl_gpio_state, "A", "GPIO state"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "gpio_config", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, devinfo, 0, hdaa_sysctl_gpio_config, "A", "GPIO configuration"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "gpo_state", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, devinfo, 0, hdaa_sysctl_gpo_state, "A", "GPO state"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "gpo_config", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, devinfo, 0, hdaa_sysctl_gpo_config, "A", "GPO configuration"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "reconfig", CTLTYPE_INT | CTLFLAG_RW, dev, 0, hdaa_sysctl_reconfig, "I", "Reprocess configuration"); bus_generic_attach(dev); return (0); } static int hdaa_detach(device_t dev) { struct hdaa_devinfo *devinfo = device_get_softc(dev); int error; if ((error = device_delete_children(dev)) != 0) return (error); hdaa_lock(devinfo); hdaa_unconfigure(dev); devinfo->poll_ival = 0; callout_stop(&devinfo->poll_jack); hdaa_unlock(devinfo); callout_drain(&devinfo->poll_jack); free(devinfo->widget, M_HDAA); return (0); } static int hdaa_print_child(device_t dev, device_t child) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_pcm_devinfo *pdevinfo = (struct hdaa_pcm_devinfo *)device_get_ivars(child); struct hdaa_audio_as *as; int retval, first = 1, i; retval = bus_print_child_header(dev, child); retval += printf(" at nid "); if (pdevinfo->playas >= 0) { as = &devinfo->as[pdevinfo->playas]; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; retval += printf("%s%d", first ? "" : ",", as->pins[i]); first = 0; } } if (pdevinfo->recas >= 0) { if (pdevinfo->playas >= 0) { retval += printf(" and "); first = 1; } as = &devinfo->as[pdevinfo->recas]; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; retval += printf("%s%d", first ? "" : ",", as->pins[i]); first = 0; } } retval += bus_print_child_footer(dev, child); return (retval); } static int hdaa_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_pcm_devinfo *pdevinfo = (struct hdaa_pcm_devinfo *)device_get_ivars(child); struct hdaa_audio_as *as; int first = 1, i, len = 0; len += snprintf(buf + len, buflen - len, "nid="); if (pdevinfo->playas >= 0) { as = &devinfo->as[pdevinfo->playas]; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; len += snprintf(buf + len, buflen - len, "%s%d", first ? "" : ",", as->pins[i]); first = 0; } } if (pdevinfo->recas >= 0) { as = &devinfo->as[pdevinfo->recas]; for (i = 0; i < 16; i++) { if (as->pins[i] <= 0) continue; len += snprintf(buf + len, buflen - len, "%s%d", first ? "" : ",", as->pins[i]); first = 0; } } return (0); } static void hdaa_stream_intr(device_t dev, int dir, int stream) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_chan *ch; int i; for (i = 0; i < devinfo->num_chans; i++) { ch = &devinfo->chans[i]; if (!(ch->flags & HDAA_CHN_RUNNING)) continue; if (ch->dir == ((dir == 1) ? PCMDIR_PLAY : PCMDIR_REC) && ch->sid == stream) { hdaa_unlock(devinfo); chn_intr(ch->c); hdaa_lock(devinfo); } } } static void hdaa_unsol_intr(device_t dev, uint32_t resp) { struct hdaa_devinfo *devinfo = device_get_softc(dev); struct hdaa_widget *w; int i, tag, flags; HDA_BOOTHVERBOSE( device_printf(dev, "Unsolicited response %08x\n", resp); ); tag = resp >> 26; for (i = devinfo->startnode; i < devinfo->endnode; i++) { w = hdaa_widget_get(devinfo, i); if (w == NULL || w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; if (w->unsol != tag) continue; if (HDA_PARAM_PIN_CAP_DP(w->wclass.pin.cap) || HDA_PARAM_PIN_CAP_HDMI(w->wclass.pin.cap)) flags = resp & 0x03; else flags = 0x01; if (flags & 0x01) hdaa_presence_handler(w); if (flags & 0x02) hdaa_eld_handler(w); } } static device_method_t hdaa_methods[] = { /* device interface */ DEVMETHOD(device_probe, hdaa_probe), DEVMETHOD(device_attach, hdaa_attach), DEVMETHOD(device_detach, hdaa_detach), DEVMETHOD(device_suspend, hdaa_suspend), DEVMETHOD(device_resume, hdaa_resume), /* Bus interface */ DEVMETHOD(bus_print_child, hdaa_print_child), DEVMETHOD(bus_child_location_str, hdaa_child_location_str), DEVMETHOD(hdac_stream_intr, hdaa_stream_intr), DEVMETHOD(hdac_unsol_intr, hdaa_unsol_intr), DEVMETHOD(hdac_pindump, hdaa_pindump), DEVMETHOD_END }; static driver_t hdaa_driver = { "hdaa", hdaa_methods, sizeof(struct hdaa_devinfo), }; static devclass_t hdaa_devclass; DRIVER_MODULE(snd_hda, hdacc, hdaa_driver, hdaa_devclass, NULL, NULL); static void hdaa_chan_formula(struct hdaa_devinfo *devinfo, int asid, char *buf, int buflen) { struct hdaa_audio_as *as; int c; as = &devinfo->as[asid]; c = devinfo->chans[as->chans[0]].channels; if (c == 1) snprintf(buf, buflen, "mono"); else if (c == 2) { if (as->hpredir < 0) buf[0] = 0; else snprintf(buf, buflen, "2.0"); } else if (as->pinset == 0x0003) snprintf(buf, buflen, "3.1"); else if (as->pinset == 0x0005 || as->pinset == 0x0011) snprintf(buf, buflen, "4.0"); else if (as->pinset == 0x0007 || as->pinset == 0x0013) snprintf(buf, buflen, "5.1"); else if (as->pinset == 0x0017) snprintf(buf, buflen, "7.1"); else snprintf(buf, buflen, "%dch", c); if (as->hpredir >= 0) strlcat(buf, "+HP", buflen); } static int hdaa_chan_type(struct hdaa_devinfo *devinfo, int asid) { struct hdaa_audio_as *as; struct hdaa_widget *w; int i, t = -1, t1; as = &devinfo->as[asid]; for (i = 0; i < 16; i++) { w = hdaa_widget_get(devinfo, as->pins[i]); if (w == NULL || w->enable == 0 || w->type != HDA_PARAM_AUDIO_WIDGET_CAP_TYPE_PIN_COMPLEX) continue; t1 = HDA_CONFIG_DEFAULTCONF_DEVICE(w->wclass.pin.config); if (t == -1) t = t1; else if (t != t1) { t = -2; break; } } return (t); } static int hdaa_sysctl_32bit(SYSCTL_HANDLER_ARGS) { struct hdaa_audio_as *as = (struct hdaa_audio_as *)oidp->oid_arg1; struct hdaa_pcm_devinfo *pdevinfo = as->pdevinfo; struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_chan *ch; int error, val, i; uint32_t pcmcap; ch = &devinfo->chans[as->chans[0]]; val = (ch->bit32 == 4) ? 32 : ((ch->bit32 == 3) ? 24 : ((ch->bit32 == 2) ? 20 : 0)); error = sysctl_handle_int(oidp, &val, 0, req); if (error != 0 || req->newptr == NULL) return (error); pcmcap = ch->supp_pcm_size_rate; if (val == 32 && HDA_PARAM_SUPP_PCM_SIZE_RATE_32BIT(pcmcap)) ch->bit32 = 4; else if (val == 24 && HDA_PARAM_SUPP_PCM_SIZE_RATE_24BIT(pcmcap)) ch->bit32 = 3; else if (val == 20 && HDA_PARAM_SUPP_PCM_SIZE_RATE_20BIT(pcmcap)) ch->bit32 = 2; else return (EINVAL); for (i = 1; i < as->num_chans; i++) devinfo->chans[as->chans[i]].bit32 = ch->bit32; return (0); } static int hdaa_pcm_probe(device_t dev) { struct hdaa_pcm_devinfo *pdevinfo = (struct hdaa_pcm_devinfo *)device_get_ivars(dev); struct hdaa_devinfo *devinfo = pdevinfo->devinfo; const char *pdesc; char chans1[8], chans2[8]; char buf[128]; int loc1, loc2, t1, t2; if (pdevinfo->playas >= 0) loc1 = devinfo->as[pdevinfo->playas].location; else loc1 = devinfo->as[pdevinfo->recas].location; if (pdevinfo->recas >= 0) loc2 = devinfo->as[pdevinfo->recas].location; else loc2 = loc1; if (loc1 != loc2) loc1 = -2; if (loc1 >= 0 && HDA_LOCS[loc1][0] == '0') loc1 = -2; chans1[0] = 0; chans2[0] = 0; t1 = t2 = -1; if (pdevinfo->playas >= 0) { hdaa_chan_formula(devinfo, pdevinfo->playas, chans1, sizeof(chans1)); t1 = hdaa_chan_type(devinfo, pdevinfo->playas); } if (pdevinfo->recas >= 0) { hdaa_chan_formula(devinfo, pdevinfo->recas, chans2, sizeof(chans2)); t2 = hdaa_chan_type(devinfo, pdevinfo->recas); } if (chans1[0] != 0 || chans2[0] != 0) { if (chans1[0] == 0 && pdevinfo->playas >= 0) snprintf(chans1, sizeof(chans1), "2.0"); else if (chans2[0] == 0 && pdevinfo->recas >= 0) snprintf(chans2, sizeof(chans2), "2.0"); if (strcmp(chans1, chans2) == 0) chans2[0] = 0; } if (t1 == -1) t1 = t2; else if (t2 == -1) t2 = t1; if (t1 != t2) t1 = -2; if (pdevinfo->digital) t1 = -2; pdesc = device_get_desc(device_get_parent(dev)); snprintf(buf, sizeof(buf), "%.*s (%s%s%s%s%s%s%s%s%s)", (int)(strlen(pdesc) - 21), pdesc, loc1 >= 0 ? HDA_LOCS[loc1] : "", loc1 >= 0 ? " " : "", (pdevinfo->digital == 0x7)?"HDMI/DP": ((pdevinfo->digital == 0x5)?"DisplayPort": ((pdevinfo->digital == 0x3)?"HDMI": ((pdevinfo->digital)?"Digital":"Analog"))), chans1[0] ? " " : "", chans1, chans2[0] ? "/" : "", chans2, t1 >= 0 ? " " : "", t1 >= 0 ? HDA_DEVS[t1] : ""); device_set_desc_copy(dev, buf); return (BUS_PROBE_SPECIFIC); } static int hdaa_pcm_attach(device_t dev) { struct hdaa_pcm_devinfo *pdevinfo = (struct hdaa_pcm_devinfo *)device_get_ivars(dev); struct hdaa_devinfo *devinfo = pdevinfo->devinfo; struct hdaa_audio_as *as; struct snddev_info *d; char status[SND_STATUSLEN]; int i; pdevinfo->chan_size = pcm_getbuffersize(dev, HDA_BUFSZ_MIN, HDA_BUFSZ_DEFAULT, HDA_BUFSZ_MAX); HDA_BOOTVERBOSE( hdaa_dump_dac(pdevinfo); hdaa_dump_adc(pdevinfo); hdaa_dump_mix(pdevinfo); hdaa_dump_ctls(pdevinfo, "Master Volume", SOUND_MASK_VOLUME); hdaa_dump_ctls(pdevinfo, "PCM Volume", SOUND_MASK_PCM); hdaa_dump_ctls(pdevinfo, "CD Volume", SOUND_MASK_CD); hdaa_dump_ctls(pdevinfo, "Microphone Volume", SOUND_MASK_MIC); hdaa_dump_ctls(pdevinfo, "Microphone2 Volume", SOUND_MASK_MONITOR); hdaa_dump_ctls(pdevinfo, "Line-in Volume", SOUND_MASK_LINE); hdaa_dump_ctls(pdevinfo, "Speaker/Beep Volume", SOUND_MASK_SPEAKER); hdaa_dump_ctls(pdevinfo, "Recording Level", SOUND_MASK_RECLEV); hdaa_dump_ctls(pdevinfo, "Input Mix Level", SOUND_MASK_IMIX); hdaa_dump_ctls(pdevinfo, "Input Monitoring Level", SOUND_MASK_IGAIN); hdaa_dump_ctls(pdevinfo, NULL, 0); ); if (resource_int_value(device_get_name(dev), device_get_unit(dev), "blocksize", &i) == 0 && i > 0) { i &= HDA_BLK_ALIGN; if (i < HDA_BLK_MIN) i = HDA_BLK_MIN; pdevinfo->chan_blkcnt = pdevinfo->chan_size / i; i = 0; while (pdevinfo->chan_blkcnt >> i) i++; pdevinfo->chan_blkcnt = 1 << (i - 1); if (pdevinfo->chan_blkcnt < HDA_BDL_MIN) pdevinfo->chan_blkcnt = HDA_BDL_MIN; else if (pdevinfo->chan_blkcnt > HDA_BDL_MAX) pdevinfo->chan_blkcnt = HDA_BDL_MAX; } else pdevinfo->chan_blkcnt = HDA_BDL_DEFAULT; /* * We don't register interrupt handler with snd_setup_intr * in pcm device. Mark pcm device as MPSAFE manually. */ pcm_setflags(dev, pcm_getflags(dev) | SD_F_MPSAFE); HDA_BOOTHVERBOSE( device_printf(dev, "OSS mixer initialization...\n"); ); if (mixer_init(dev, &hdaa_audio_ctl_ossmixer_class, pdevinfo) != 0) device_printf(dev, "Can't register mixer\n"); HDA_BOOTHVERBOSE( device_printf(dev, "Registering PCM channels...\n"); ); if (pcm_register(dev, pdevinfo, (pdevinfo->playas >= 0)?1:0, (pdevinfo->recas >= 0)?1:0) != 0) device_printf(dev, "Can't register PCM\n"); pdevinfo->registered++; d = device_get_softc(dev); if (pdevinfo->playas >= 0) { as = &devinfo->as[pdevinfo->playas]; for (i = 0; i < as->num_chans; i++) pcm_addchan(dev, PCMDIR_PLAY, &hdaa_channel_class, &devinfo->chans[as->chans[i]]); SYSCTL_ADD_PROC(&d->play_sysctl_ctx, SYSCTL_CHILDREN(d->play_sysctl_tree), OID_AUTO, "32bit", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, as, sizeof(as), hdaa_sysctl_32bit, "I", "Resolution of 32bit samples (20/24/32bit)"); } if (pdevinfo->recas >= 0) { as = &devinfo->as[pdevinfo->recas]; for (i = 0; i < as->num_chans; i++) pcm_addchan(dev, PCMDIR_REC, &hdaa_channel_class, &devinfo->chans[as->chans[i]]); SYSCTL_ADD_PROC(&d->rec_sysctl_ctx, SYSCTL_CHILDREN(d->rec_sysctl_tree), OID_AUTO, "32bit", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, as, sizeof(as), hdaa_sysctl_32bit, "I", "Resolution of 32bit samples (20/24/32bit)"); pdevinfo->autorecsrc = 2; resource_int_value(device_get_name(dev), device_get_unit(dev), "rec.autosrc", &pdevinfo->autorecsrc); SYSCTL_ADD_INT(&d->rec_sysctl_ctx, SYSCTL_CHILDREN(d->rec_sysctl_tree), OID_AUTO, "autosrc", CTLFLAG_RW, &pdevinfo->autorecsrc, 0, "Automatic recording source selection"); } if (pdevinfo->mixer != NULL) { hdaa_audio_ctl_set_defaults(pdevinfo); hdaa_lock(devinfo); if (pdevinfo->playas >= 0) { as = &devinfo->as[pdevinfo->playas]; hdaa_channels_handler(as); } if (pdevinfo->recas >= 0) { as = &devinfo->as[pdevinfo->recas]; hdaa_autorecsrc_handler(as, NULL); hdaa_channels_handler(as); } hdaa_unlock(devinfo); } snprintf(status, SND_STATUSLEN, "on %s %s", device_get_nameunit(device_get_parent(dev)), PCM_KLDSTRING(snd_hda)); pcm_setstatus(dev, status); return (0); } static int hdaa_pcm_detach(device_t dev) { struct hdaa_pcm_devinfo *pdevinfo = (struct hdaa_pcm_devinfo *)device_get_ivars(dev); int err; if (pdevinfo->registered > 0) { err = pcm_unregister(dev); if (err != 0) return (err); } return (0); } static device_method_t hdaa_pcm_methods[] = { /* device interface */ DEVMETHOD(device_probe, hdaa_pcm_probe), DEVMETHOD(device_attach, hdaa_pcm_attach), DEVMETHOD(device_detach, hdaa_pcm_detach), DEVMETHOD_END }; static driver_t hdaa_pcm_driver = { "pcm", hdaa_pcm_methods, PCM_SOFTC_SIZE, }; DRIVER_MODULE(snd_hda_pcm, hdaa, hdaa_pcm_driver, pcm_devclass, NULL, NULL); MODULE_DEPEND(snd_hda, sound, SOUND_MINVER, SOUND_PREFVER, SOUND_MAXVER); MODULE_VERSION(snd_hda, 1); Index: head/sys/dev/syscons/fire/fire_saver.c =================================================================== --- head/sys/dev/syscons/fire/fire_saver.c (revision 327948) +++ head/sys/dev/syscons/fire/fire_saver.c (revision 327949) @@ -1,197 +1,197 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999 Brad Forschinger * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * $FreeBSD$ */ /* * brad forschinger, 19990504 * * written with much help from warp_saver.c * */ #include #include #include #include #include #include #include #include #include #include #include #define SAVER_NAME "fire_saver" #define RED(n) ((n) * 3 + 0) #define GREEN(n) ((n) * 3 + 1) #define BLUE(n) ((n) * 3 + 2) #define SET_ORIGIN(adp, o) do { \ int oo = o; \ if (oo != last_origin) \ vidd_set_win_org(adp, last_origin = oo); \ } while (0) static u_char *buf; static u_char *vid; static int banksize, scrmode, bpsl, scrw, scrh; static u_char fire_pal[768]; static int blanked; static void fire_update(video_adapter_t *adp) { int x, y; int o, p; int last_origin = -1; /* make a new bottom line */ for (x = 0, y = scrh; x < scrw; x++) buf[x + (y * bpsl)] = random() % 160 + 96; /* fade the flames out */ for (y = 0; y < scrh; y++) { for (x = 0; x < scrw; x++) { buf[x + (y * scrw)] = (buf[(x + 0) + ((y + 0) * scrw)] + buf[(x - 1) + ((y + 1) * scrw)] + buf[(x + 0) + ((y + 1) * scrw)] + buf[(x + 1) + ((y + 1) * scrw)]) / 4; if (buf[x + (y * scrw)] > 0) buf[x + (y * scrw)]--; } } /* blit our buffer into video ram */ for (y = 0, p = 0, o = 0; y < scrh; y++, p += bpsl) { while (p > banksize) { p -= banksize; o += banksize; } SET_ORIGIN(adp, o); if (p + scrw < banksize) { bcopy(buf + y * scrw, vid + p, scrw); } else { bcopy(buf + y * scrw, vid + p, banksize - p); SET_ORIGIN(adp, o + banksize); bcopy(buf + y * scrw + (banksize - p), vid, scrw - (banksize - p)); p -= banksize; o += banksize; } } } static int fire_saver(video_adapter_t *adp, int blank) { int pl; if (blank) { /* switch to graphics mode */ if (blanked <= 0) { pl = splhigh(); vidd_set_mode(adp, scrmode); vidd_load_palette(adp, fire_pal); blanked++; vid = (u_char *)adp->va_window; banksize = adp->va_window_size; bpsl = adp->va_line_width; splx(pl); vidd_clear(adp); } fire_update(adp); } else { blanked = 0; } return 0; } static int fire_init(video_adapter_t *adp) { video_info_t info; int i, red, green, blue; if (!vidd_get_info(adp, M_VGA_CG320, &info)) { scrmode = M_VGA_CG320; } else { log(LOG_NOTICE, "%s: the console does not support M_VGA_CG320\n", SAVER_NAME); return (ENODEV); } scrw = info.vi_width; scrh = info.vi_height; - buf = (u_char *)malloc(scrw * (scrh + 1), M_DEVBUF, M_NOWAIT); + buf = (u_char *)mallocarray(scrw, scrh + 1, M_DEVBUF, M_NOWAIT); if (buf) { bzero(buf, scrw * (scrh + 1)); } else { log(LOG_NOTICE, "%s: buffer allocation is failed\n", SAVER_NAME); return (ENODEV); } /* intialize the palette */ red = green = blue = 0; for (i = 0; i < 256; i++) { red++; if (red > 128) green += 2; fire_pal[RED(i)] = red; fire_pal[GREEN(i)] = green; fire_pal[BLUE(i)] = blue; } return (0); } static int fire_term(video_adapter_t *adp) { free(buf, M_DEVBUF); return (0); } static scrn_saver_t fire_module = { SAVER_NAME, fire_init, fire_term, fire_saver, NULL }; SAVER_MODULE(fire_saver, fire_module); Index: head/sys/dev/virtio/console/virtio_console.c =================================================================== --- head/sys/dev/virtio/console/virtio_console.c (revision 327948) +++ head/sys/dev/virtio/console/virtio_console.c (revision 327949) @@ -1,1502 +1,1504 @@ /*- * Copyright (c) 2014, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO console devices. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_if.h" #define VTCON_MAX_PORTS 32 #define VTCON_TTY_PREFIX "V" #define VTCON_TTY_ALIAS_PREFIX "vtcon" #define VTCON_BULK_BUFSZ 128 #define VTCON_CTRL_BUFSZ 128 /* * The buffers cannot cross more than one page boundary due to the * size of the sglist segment array used. */ CTASSERT(VTCON_BULK_BUFSZ <= PAGE_SIZE); CTASSERT(VTCON_CTRL_BUFSZ <= PAGE_SIZE); CTASSERT(sizeof(struct virtio_console_config) <= VTCON_CTRL_BUFSZ); struct vtcon_softc; struct vtcon_softc_port; struct vtcon_port { struct mtx vtcport_mtx; struct vtcon_softc *vtcport_sc; struct vtcon_softc_port *vtcport_scport; struct tty *vtcport_tty; struct virtqueue *vtcport_invq; struct virtqueue *vtcport_outvq; int vtcport_id; int vtcport_flags; #define VTCON_PORT_FLAG_GONE 0x01 #define VTCON_PORT_FLAG_CONSOLE 0x02 #define VTCON_PORT_FLAG_ALIAS 0x04 #if defined(KDB) int vtcport_alt_break_state; #endif }; #define VTCON_PORT_LOCK(_port) mtx_lock(&(_port)->vtcport_mtx) #define VTCON_PORT_UNLOCK(_port) mtx_unlock(&(_port)->vtcport_mtx) struct vtcon_softc_port { struct vtcon_softc *vcsp_sc; struct vtcon_port *vcsp_port; struct virtqueue *vcsp_invq; struct virtqueue *vcsp_outvq; }; struct vtcon_softc { device_t vtcon_dev; struct mtx vtcon_mtx; uint64_t vtcon_features; uint32_t vtcon_max_ports; uint32_t vtcon_flags; #define VTCON_FLAG_DETACHED 0x01 #define VTCON_FLAG_SIZE 0x02 #define VTCON_FLAG_MULTIPORT 0x04 /* * Ports can be added and removed during runtime, but we have * to allocate all the virtqueues during attach. This array is * indexed by the port ID. */ struct vtcon_softc_port *vtcon_ports; struct task vtcon_ctrl_task; struct virtqueue *vtcon_ctrl_rxvq; struct virtqueue *vtcon_ctrl_txvq; struct mtx vtcon_ctrl_tx_mtx; }; #define VTCON_LOCK(_sc) mtx_lock(&(_sc)->vtcon_mtx) #define VTCON_UNLOCK(_sc) mtx_unlock(&(_sc)->vtcon_mtx) #define VTCON_LOCK_ASSERT(_sc) \ mtx_assert(&(_sc)->vtcon_mtx, MA_OWNED) #define VTCON_LOCK_ASSERT_NOTOWNED(_sc) \ mtx_assert(&(_sc)->vtcon_mtx, MA_NOTOWNED) #define VTCON_CTRL_TX_LOCK(_sc) mtx_lock(&(_sc)->vtcon_ctrl_tx_mtx) #define VTCON_CTRL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->vtcon_ctrl_tx_mtx) #define VTCON_ASSERT_VALID_PORTID(_sc, _id) \ KASSERT((_id) >= 0 && (_id) < (_sc)->vtcon_max_ports, \ ("%s: port ID %d out of range", __func__, _id)) #define VTCON_FEATURES VIRTIO_CONSOLE_F_MULTIPORT static struct virtio_feature_desc vtcon_feature_desc[] = { { VIRTIO_CONSOLE_F_SIZE, "ConsoleSize" }, { VIRTIO_CONSOLE_F_MULTIPORT, "MultiplePorts" }, { VIRTIO_CONSOLE_F_EMERG_WRITE, "EmergencyWrite" }, { 0, NULL } }; static int vtcon_modevent(module_t, int, void *); static void vtcon_drain_all(void); static int vtcon_probe(device_t); static int vtcon_attach(device_t); static int vtcon_detach(device_t); static int vtcon_config_change(device_t); static void vtcon_setup_features(struct vtcon_softc *); static void vtcon_negotiate_features(struct vtcon_softc *); static int vtcon_alloc_scports(struct vtcon_softc *); static int vtcon_alloc_virtqueues(struct vtcon_softc *); static void vtcon_read_config(struct vtcon_softc *, struct virtio_console_config *); static void vtcon_determine_max_ports(struct vtcon_softc *, struct virtio_console_config *); static void vtcon_destroy_ports(struct vtcon_softc *); static void vtcon_stop(struct vtcon_softc *); static int vtcon_ctrl_event_enqueue(struct vtcon_softc *, struct virtio_console_control *); static int vtcon_ctrl_event_create(struct vtcon_softc *); static void vtcon_ctrl_event_requeue(struct vtcon_softc *, struct virtio_console_control *); static int vtcon_ctrl_event_populate(struct vtcon_softc *); static void vtcon_ctrl_event_drain(struct vtcon_softc *); static int vtcon_ctrl_init(struct vtcon_softc *); static void vtcon_ctrl_deinit(struct vtcon_softc *); static void vtcon_ctrl_port_add_event(struct vtcon_softc *, int); static void vtcon_ctrl_port_remove_event(struct vtcon_softc *, int); static void vtcon_ctrl_port_console_event(struct vtcon_softc *, int); static void vtcon_ctrl_port_open_event(struct vtcon_softc *, int); static void vtcon_ctrl_port_name_event(struct vtcon_softc *, int, const char *, size_t); static void vtcon_ctrl_process_event(struct vtcon_softc *, struct virtio_console_control *, void *, size_t); static void vtcon_ctrl_task_cb(void *, int); static void vtcon_ctrl_event_intr(void *); static void vtcon_ctrl_poll(struct vtcon_softc *, struct virtio_console_control *control); static void vtcon_ctrl_send_control(struct vtcon_softc *, uint32_t, uint16_t, uint16_t); static int vtcon_port_enqueue_buf(struct vtcon_port *, void *, size_t); static int vtcon_port_create_buf(struct vtcon_port *); static void vtcon_port_requeue_buf(struct vtcon_port *, void *); static int vtcon_port_populate(struct vtcon_port *); static void vtcon_port_destroy(struct vtcon_port *); static int vtcon_port_create(struct vtcon_softc *, int); static void vtcon_port_dev_alias(struct vtcon_port *, const char *, size_t); static void vtcon_port_drain_bufs(struct virtqueue *); static void vtcon_port_drain(struct vtcon_port *); static void vtcon_port_teardown(struct vtcon_port *); static void vtcon_port_change_size(struct vtcon_port *, uint16_t, uint16_t); static void vtcon_port_update_console_size(struct vtcon_softc *); static void vtcon_port_enable_intr(struct vtcon_port *); static void vtcon_port_disable_intr(struct vtcon_port *); static void vtcon_port_in(struct vtcon_port *); static void vtcon_port_intr(void *); static void vtcon_port_out(struct vtcon_port *, void *, int); static void vtcon_port_submit_event(struct vtcon_port *, uint16_t, uint16_t); static int vtcon_tty_open(struct tty *); static void vtcon_tty_close(struct tty *); static void vtcon_tty_outwakeup(struct tty *); static void vtcon_tty_free(void *); static void vtcon_get_console_size(struct vtcon_softc *, uint16_t *, uint16_t *); static void vtcon_enable_interrupts(struct vtcon_softc *); static void vtcon_disable_interrupts(struct vtcon_softc *); static int vtcon_pending_free; static struct ttydevsw vtcon_tty_class = { .tsw_flags = 0, .tsw_open = vtcon_tty_open, .tsw_close = vtcon_tty_close, .tsw_outwakeup = vtcon_tty_outwakeup, .tsw_free = vtcon_tty_free, }; static device_method_t vtcon_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtcon_probe), DEVMETHOD(device_attach, vtcon_attach), DEVMETHOD(device_detach, vtcon_detach), /* VirtIO methods. */ DEVMETHOD(virtio_config_change, vtcon_config_change), DEVMETHOD_END }; static driver_t vtcon_driver = { "vtcon", vtcon_methods, sizeof(struct vtcon_softc) }; static devclass_t vtcon_devclass; DRIVER_MODULE(virtio_console, virtio_pci, vtcon_driver, vtcon_devclass, vtcon_modevent, 0); MODULE_VERSION(virtio_console, 1); MODULE_DEPEND(virtio_console, virtio, 1, 1, 1); static int vtcon_modevent(module_t mod, int type, void *unused) { int error; switch (type) { case MOD_LOAD: error = 0; break; case MOD_QUIESCE: error = 0; break; case MOD_UNLOAD: vtcon_drain_all(); error = 0; break; case MOD_SHUTDOWN: error = 0; break; default: error = EOPNOTSUPP; break; } return (error); } static void vtcon_drain_all(void) { int first; for (first = 1; vtcon_pending_free != 0; first = 0) { if (first != 0) { printf("virtio_console: Waiting for all detached TTY " "devices to have open fds closed.\n"); } pause("vtcondra", hz); } } static int vtcon_probe(device_t dev) { if (virtio_get_device_type(dev) != VIRTIO_ID_CONSOLE) return (ENXIO); device_set_desc(dev, "VirtIO Console Adapter"); return (BUS_PROBE_DEFAULT); } static int vtcon_attach(device_t dev) { struct vtcon_softc *sc; struct virtio_console_config concfg; int error; sc = device_get_softc(dev); sc->vtcon_dev = dev; mtx_init(&sc->vtcon_mtx, "vtconmtx", NULL, MTX_DEF); mtx_init(&sc->vtcon_ctrl_tx_mtx, "vtconctrlmtx", NULL, MTX_DEF); virtio_set_feature_desc(dev, vtcon_feature_desc); vtcon_setup_features(sc); vtcon_read_config(sc, &concfg); vtcon_determine_max_ports(sc, &concfg); error = vtcon_alloc_scports(sc); if (error) { device_printf(dev, "cannot allocate softc port structures\n"); goto fail; } error = vtcon_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) { TASK_INIT(&sc->vtcon_ctrl_task, 0, vtcon_ctrl_task_cb, sc); error = vtcon_ctrl_init(sc); if (error) goto fail; } else { error = vtcon_port_create(sc, 0); if (error) goto fail; if (sc->vtcon_flags & VTCON_FLAG_SIZE) vtcon_port_update_console_size(sc); } error = virtio_setup_intr(dev, INTR_TYPE_TTY); if (error) { device_printf(dev, "cannot setup virtqueue interrupts\n"); goto fail; } vtcon_enable_interrupts(sc); vtcon_ctrl_send_control(sc, VIRTIO_CONSOLE_BAD_ID, VIRTIO_CONSOLE_DEVICE_READY, 1); fail: if (error) vtcon_detach(dev); return (error); } static int vtcon_detach(device_t dev) { struct vtcon_softc *sc; sc = device_get_softc(dev); VTCON_LOCK(sc); sc->vtcon_flags |= VTCON_FLAG_DETACHED; if (device_is_attached(dev)) vtcon_stop(sc); VTCON_UNLOCK(sc); if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) { taskqueue_drain(taskqueue_thread, &sc->vtcon_ctrl_task); vtcon_ctrl_deinit(sc); } vtcon_destroy_ports(sc); mtx_destroy(&sc->vtcon_mtx); mtx_destroy(&sc->vtcon_ctrl_tx_mtx); return (0); } static int vtcon_config_change(device_t dev) { struct vtcon_softc *sc; sc = device_get_softc(dev); /* * When the multiport feature is negotiated, all configuration * changes are done through control virtqueue events. */ if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0) { if (sc->vtcon_flags & VTCON_FLAG_SIZE) vtcon_port_update_console_size(sc); } return (0); } static void vtcon_negotiate_features(struct vtcon_softc *sc) { device_t dev; uint64_t features; dev = sc->vtcon_dev; features = VTCON_FEATURES; sc->vtcon_features = virtio_negotiate_features(dev, features); } static void vtcon_setup_features(struct vtcon_softc *sc) { device_t dev; dev = sc->vtcon_dev; vtcon_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_SIZE)) sc->vtcon_flags |= VTCON_FLAG_SIZE; if (virtio_with_feature(dev, VIRTIO_CONSOLE_F_MULTIPORT)) sc->vtcon_flags |= VTCON_FLAG_MULTIPORT; } #define VTCON_GET_CONFIG(_dev, _feature, _field, _cfg) \ if (virtio_with_feature(_dev, _feature)) { \ virtio_read_device_config(_dev, \ offsetof(struct virtio_console_config, _field), \ &(_cfg)->_field, sizeof((_cfg)->_field)); \ } static void vtcon_read_config(struct vtcon_softc *sc, struct virtio_console_config *concfg) { device_t dev; dev = sc->vtcon_dev; bzero(concfg, sizeof(struct virtio_console_config)); VTCON_GET_CONFIG(dev, VIRTIO_CONSOLE_F_SIZE, cols, concfg); VTCON_GET_CONFIG(dev, VIRTIO_CONSOLE_F_SIZE, rows, concfg); VTCON_GET_CONFIG(dev, VIRTIO_CONSOLE_F_MULTIPORT, max_nr_ports, concfg); } #undef VTCON_GET_CONFIG static int vtcon_alloc_scports(struct vtcon_softc *sc) { struct vtcon_softc_port *scport; - int max, i; + u_int max, i; max = sc->vtcon_max_ports; - sc->vtcon_ports = malloc(sizeof(struct vtcon_softc_port) * max, + sc->vtcon_ports = mallocarray(max, sizeof(struct vtcon_softc_port), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtcon_ports == NULL) return (ENOMEM); for (i = 0; i < max; i++) { scport = &sc->vtcon_ports[i]; scport->vcsp_sc = sc; } return (0); } static int vtcon_alloc_virtqueues(struct vtcon_softc *sc) { device_t dev; struct vq_alloc_info *info; struct vtcon_softc_port *scport; - int i, idx, portidx, nvqs, error; + u_int i, idx, portidx, nvqs; + int error; dev = sc->vtcon_dev; nvqs = sc->vtcon_max_ports * 2; if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) nvqs += 2; - info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT); + info = mallocarray(nvqs, sizeof(struct vq_alloc_info), M_TEMP, + M_NOWAIT); if (info == NULL) return (ENOMEM); for (i = 0, idx = 0, portidx = 0; i < nvqs / 2; i++, idx += 2) { if (i == 1) { /* The control virtqueues are after the first port. */ VQ_ALLOC_INFO_INIT(&info[idx], 0, vtcon_ctrl_event_intr, sc, &sc->vtcon_ctrl_rxvq, "%s-control rx", device_get_nameunit(dev)); VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, sc, &sc->vtcon_ctrl_txvq, "%s-control tx", device_get_nameunit(dev)); continue; } scport = &sc->vtcon_ports[portidx]; VQ_ALLOC_INFO_INIT(&info[idx], 0, vtcon_port_intr, scport, &scport->vcsp_invq, "%s-port%d in", device_get_nameunit(dev), i); VQ_ALLOC_INFO_INIT(&info[idx+1], 0, NULL, NULL, &scport->vcsp_outvq, "%s-port%d out", device_get_nameunit(dev), i); portidx++; } error = virtio_alloc_virtqueues(dev, 0, nvqs, info); free(info, M_TEMP); return (error); } static void vtcon_determine_max_ports(struct vtcon_softc *sc, struct virtio_console_config *concfg) { if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) { sc->vtcon_max_ports = min(concfg->max_nr_ports, VTCON_MAX_PORTS); if (sc->vtcon_max_ports == 0) sc->vtcon_max_ports = 1; } else sc->vtcon_max_ports = 1; } static void vtcon_destroy_ports(struct vtcon_softc *sc) { struct vtcon_softc_port *scport; struct vtcon_port *port; struct virtqueue *vq; int i; if (sc->vtcon_ports == NULL) return; VTCON_LOCK(sc); for (i = 0; i < sc->vtcon_max_ports; i++) { scport = &sc->vtcon_ports[i]; port = scport->vcsp_port; if (port != NULL) { scport->vcsp_port = NULL; VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); vtcon_port_teardown(port); VTCON_LOCK(sc); } vq = scport->vcsp_invq; if (vq != NULL) vtcon_port_drain_bufs(vq); } VTCON_UNLOCK(sc); free(sc->vtcon_ports, M_DEVBUF); sc->vtcon_ports = NULL; } static void vtcon_stop(struct vtcon_softc *sc) { vtcon_disable_interrupts(sc); virtio_stop(sc->vtcon_dev); } static int vtcon_ctrl_event_enqueue(struct vtcon_softc *sc, struct virtio_console_control *control) { struct sglist_seg segs[2]; struct sglist sg; struct virtqueue *vq; int error; vq = sc->vtcon_ctrl_rxvq; sglist_init(&sg, 2, segs); error = sglist_append(&sg, control, VTCON_CTRL_BUFSZ); KASSERT(error == 0, ("%s: error %d adding control to sglist", __func__, error)); return (virtqueue_enqueue(vq, control, &sg, 0, sg.sg_nseg)); } static int vtcon_ctrl_event_create(struct vtcon_softc *sc) { struct virtio_console_control *control; int error; control = malloc(VTCON_CTRL_BUFSZ, M_DEVBUF, M_ZERO | M_NOWAIT); if (control == NULL) return (ENOMEM); error = vtcon_ctrl_event_enqueue(sc, control); if (error) free(control, M_DEVBUF); return (error); } static void vtcon_ctrl_event_requeue(struct vtcon_softc *sc, struct virtio_console_control *control) { int error; bzero(control, VTCON_CTRL_BUFSZ); error = vtcon_ctrl_event_enqueue(sc, control); KASSERT(error == 0, ("%s: cannot requeue control buffer %d", __func__, error)); } static int vtcon_ctrl_event_populate(struct vtcon_softc *sc) { struct virtqueue *vq; int nbufs, error; vq = sc->vtcon_ctrl_rxvq; error = ENOSPC; for (nbufs = 0; !virtqueue_full(vq); nbufs++) { error = vtcon_ctrl_event_create(sc); if (error) break; } if (nbufs > 0) { virtqueue_notify(vq); error = 0; } return (error); } static void vtcon_ctrl_event_drain(struct vtcon_softc *sc) { struct virtio_console_control *control; struct virtqueue *vq; int last; vq = sc->vtcon_ctrl_rxvq; last = 0; if (vq == NULL) return; VTCON_LOCK(sc); while ((control = virtqueue_drain(vq, &last)) != NULL) free(control, M_DEVBUF); VTCON_UNLOCK(sc); } static int vtcon_ctrl_init(struct vtcon_softc *sc) { int error; error = vtcon_ctrl_event_populate(sc); return (error); } static void vtcon_ctrl_deinit(struct vtcon_softc *sc) { vtcon_ctrl_event_drain(sc); } static void vtcon_ctrl_port_add_event(struct vtcon_softc *sc, int id) { device_t dev; int error; dev = sc->vtcon_dev; /* This single thread only way for ports to be created. */ if (sc->vtcon_ports[id].vcsp_port != NULL) { device_printf(dev, "%s: adding port %d, but already exists\n", __func__, id); return; } error = vtcon_port_create(sc, id); if (error) { device_printf(dev, "%s: cannot create port %d: %d\n", __func__, id, error); vtcon_ctrl_send_control(sc, id, VIRTIO_CONSOLE_PORT_READY, 0); return; } } static void vtcon_ctrl_port_remove_event(struct vtcon_softc *sc, int id) { device_t dev; struct vtcon_softc_port *scport; struct vtcon_port *port; dev = sc->vtcon_dev; scport = &sc->vtcon_ports[id]; VTCON_LOCK(sc); port = scport->vcsp_port; if (port == NULL) { VTCON_UNLOCK(sc); device_printf(dev, "%s: remove port %d, but does not exist\n", __func__, id); return; } scport->vcsp_port = NULL; VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); vtcon_port_teardown(port); } static void vtcon_ctrl_port_console_event(struct vtcon_softc *sc, int id) { device_t dev; struct vtcon_softc_port *scport; struct vtcon_port *port; dev = sc->vtcon_dev; scport = &sc->vtcon_ports[id]; VTCON_LOCK(sc); port = scport->vcsp_port; if (port == NULL) { VTCON_UNLOCK(sc); device_printf(dev, "%s: console port %d, but does not exist\n", __func__, id); return; } VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); port->vtcport_flags |= VTCON_PORT_FLAG_CONSOLE; vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_OPEN, 1); VTCON_PORT_UNLOCK(port); } static void vtcon_ctrl_port_open_event(struct vtcon_softc *sc, int id) { device_t dev; struct vtcon_softc_port *scport; struct vtcon_port *port; dev = sc->vtcon_dev; scport = &sc->vtcon_ports[id]; VTCON_LOCK(sc); port = scport->vcsp_port; if (port == NULL) { VTCON_UNLOCK(sc); device_printf(dev, "%s: open port %d, but does not exist\n", __func__, id); return; } VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); vtcon_port_enable_intr(port); VTCON_PORT_UNLOCK(port); } static void vtcon_ctrl_port_name_event(struct vtcon_softc *sc, int id, const char *name, size_t len) { device_t dev; struct vtcon_softc_port *scport; struct vtcon_port *port; dev = sc->vtcon_dev; scport = &sc->vtcon_ports[id]; /* * The VirtIO specification says the NUL terminator is not included in * the length, but QEMU includes it. Adjust the length if needed. */ if (name == NULL || len == 0) return; if (name[len - 1] == '\0') { len--; if (len == 0) return; } VTCON_LOCK(sc); port = scport->vcsp_port; if (port == NULL) { VTCON_UNLOCK(sc); device_printf(dev, "%s: name port %d, but does not exist\n", __func__, id); return; } VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); vtcon_port_dev_alias(port, name, len); VTCON_PORT_UNLOCK(port); } static void vtcon_ctrl_process_event(struct vtcon_softc *sc, struct virtio_console_control *control, void *data, size_t data_len) { device_t dev; int id; dev = sc->vtcon_dev; id = control->id; if (id < 0 || id >= sc->vtcon_max_ports) { device_printf(dev, "%s: invalid port ID %d\n", __func__, id); return; } switch (control->event) { case VIRTIO_CONSOLE_PORT_ADD: vtcon_ctrl_port_add_event(sc, id); break; case VIRTIO_CONSOLE_PORT_REMOVE: vtcon_ctrl_port_remove_event(sc, id); break; case VIRTIO_CONSOLE_CONSOLE_PORT: vtcon_ctrl_port_console_event(sc, id); break; case VIRTIO_CONSOLE_RESIZE: break; case VIRTIO_CONSOLE_PORT_OPEN: vtcon_ctrl_port_open_event(sc, id); break; case VIRTIO_CONSOLE_PORT_NAME: vtcon_ctrl_port_name_event(sc, id, (const char *)data, data_len); break; } } static void vtcon_ctrl_task_cb(void *xsc, int pending) { struct vtcon_softc *sc; struct virtqueue *vq; struct virtio_console_control *control; void *data; size_t data_len; int detached; uint32_t len; sc = xsc; vq = sc->vtcon_ctrl_rxvq; VTCON_LOCK(sc); while ((detached = (sc->vtcon_flags & VTCON_FLAG_DETACHED)) == 0) { control = virtqueue_dequeue(vq, &len); if (control == NULL) break; if (len > sizeof(struct virtio_console_control)) { data = (void *) &control[1]; data_len = len - sizeof(struct virtio_console_control); } else { data = NULL; data_len = 0; } VTCON_UNLOCK(sc); vtcon_ctrl_process_event(sc, control, data, data_len); VTCON_LOCK(sc); vtcon_ctrl_event_requeue(sc, control); } if (!detached) { virtqueue_notify(vq); if (virtqueue_enable_intr(vq) != 0) taskqueue_enqueue(taskqueue_thread, &sc->vtcon_ctrl_task); } VTCON_UNLOCK(sc); } static void vtcon_ctrl_event_intr(void *xsc) { struct vtcon_softc *sc; sc = xsc; /* * Only some events require us to potentially block, but it * easier to just defer all event handling to the taskqueue. */ taskqueue_enqueue(taskqueue_thread, &sc->vtcon_ctrl_task); } static void vtcon_ctrl_poll(struct vtcon_softc *sc, struct virtio_console_control *control) { struct sglist_seg segs[2]; struct sglist sg; struct virtqueue *vq; int error; vq = sc->vtcon_ctrl_txvq; sglist_init(&sg, 2, segs); error = sglist_append(&sg, control, sizeof(struct virtio_console_control)); KASSERT(error == 0, ("%s: error %d adding control to sglist", __func__, error)); /* * We cannot use the softc lock to serialize access to this * virtqueue since this is called from the tty layer with the * port lock held. Acquiring the softc would violate our lock * ordering. */ VTCON_CTRL_TX_LOCK(sc); KASSERT(virtqueue_empty(vq), ("%s: virtqueue is not emtpy", __func__)); error = virtqueue_enqueue(vq, control, &sg, sg.sg_nseg, 0); if (error == 0) { virtqueue_notify(vq); virtqueue_poll(vq, NULL); } VTCON_CTRL_TX_UNLOCK(sc); } static void vtcon_ctrl_send_control(struct vtcon_softc *sc, uint32_t portid, uint16_t event, uint16_t value) { struct virtio_console_control control; if ((sc->vtcon_flags & VTCON_FLAG_MULTIPORT) == 0) return; control.id = portid; control.event = event; control.value = value; vtcon_ctrl_poll(sc, &control); } static int vtcon_port_enqueue_buf(struct vtcon_port *port, void *buf, size_t len) { struct sglist_seg segs[2]; struct sglist sg; struct virtqueue *vq; int error; vq = port->vtcport_invq; sglist_init(&sg, 2, segs); error = sglist_append(&sg, buf, len); KASSERT(error == 0, ("%s: error %d adding buffer to sglist", __func__, error)); error = virtqueue_enqueue(vq, buf, &sg, 0, sg.sg_nseg); return (error); } static int vtcon_port_create_buf(struct vtcon_port *port) { void *buf; int error; buf = malloc(VTCON_BULK_BUFSZ, M_DEVBUF, M_ZERO | M_NOWAIT); if (buf == NULL) return (ENOMEM); error = vtcon_port_enqueue_buf(port, buf, VTCON_BULK_BUFSZ); if (error) free(buf, M_DEVBUF); return (error); } static void vtcon_port_requeue_buf(struct vtcon_port *port, void *buf) { int error; error = vtcon_port_enqueue_buf(port, buf, VTCON_BULK_BUFSZ); KASSERT(error == 0, ("%s: cannot requeue input buffer %d", __func__, error)); } static int vtcon_port_populate(struct vtcon_port *port) { struct virtqueue *vq; int nbufs, error; vq = port->vtcport_invq; error = ENOSPC; for (nbufs = 0; !virtqueue_full(vq); nbufs++) { error = vtcon_port_create_buf(port); if (error) break; } if (nbufs > 0) { virtqueue_notify(vq); error = 0; } return (error); } static void vtcon_port_destroy(struct vtcon_port *port) { port->vtcport_sc = NULL; port->vtcport_scport = NULL; port->vtcport_invq = NULL; port->vtcport_outvq = NULL; port->vtcport_id = -1; mtx_destroy(&port->vtcport_mtx); free(port, M_DEVBUF); } static int vtcon_port_init_vqs(struct vtcon_port *port) { struct vtcon_softc_port *scport; int error; scport = port->vtcport_scport; port->vtcport_invq = scport->vcsp_invq; port->vtcport_outvq = scport->vcsp_outvq; /* * Free any data left over from when this virtqueue was in use by a * prior port. We have not yet notified the host that the port is * ready, so assume nothing in the virtqueue can be for us. */ vtcon_port_drain(port); KASSERT(virtqueue_empty(port->vtcport_invq), ("%s: in virtqueue is not empty", __func__)); KASSERT(virtqueue_empty(port->vtcport_outvq), ("%s: out virtqueue is not empty", __func__)); error = vtcon_port_populate(port); if (error) return (error); return (0); } static int vtcon_port_create(struct vtcon_softc *sc, int id) { device_t dev; struct vtcon_softc_port *scport; struct vtcon_port *port; int error; dev = sc->vtcon_dev; scport = &sc->vtcon_ports[id]; VTCON_ASSERT_VALID_PORTID(sc, id); MPASS(scport->vcsp_port == NULL); port = malloc(sizeof(struct vtcon_port), M_DEVBUF, M_NOWAIT | M_ZERO); if (port == NULL) return (ENOMEM); port->vtcport_sc = sc; port->vtcport_scport = scport; port->vtcport_id = id; mtx_init(&port->vtcport_mtx, "vtcpmtx", NULL, MTX_DEF); port->vtcport_tty = tty_alloc_mutex(&vtcon_tty_class, port, &port->vtcport_mtx); error = vtcon_port_init_vqs(port); if (error) { VTCON_PORT_LOCK(port); vtcon_port_teardown(port); return (error); } VTCON_LOCK(sc); VTCON_PORT_LOCK(port); scport->vcsp_port = port; vtcon_port_enable_intr(port); vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_READY, 1); VTCON_PORT_UNLOCK(port); VTCON_UNLOCK(sc); tty_makedev(port->vtcport_tty, NULL, "%s%r.%r", VTCON_TTY_PREFIX, device_get_unit(dev), id); return (0); } static void vtcon_port_dev_alias(struct vtcon_port *port, const char *name, size_t len) { struct vtcon_softc *sc; struct cdev *pdev; struct tty *tp; int i, error; sc = port->vtcport_sc; tp = port->vtcport_tty; if (port->vtcport_flags & VTCON_PORT_FLAG_ALIAS) return; /* Port name is UTF-8, but we can only handle ASCII. */ for (i = 0; i < len; i++) { if (!isascii(name[i])) return; } /* * Port name may not conform to the devfs requirements so we cannot use * tty_makealias() because the MAKEDEV_CHECKNAME flag must be specified. */ error = make_dev_alias_p(MAKEDEV_NOWAIT | MAKEDEV_CHECKNAME, &pdev, tp->t_dev, "%s/%*s", VTCON_TTY_ALIAS_PREFIX, (int)len, name); if (error) { device_printf(sc->vtcon_dev, "%s: cannot make dev alias (%s/%*s) error %d\n", __func__, VTCON_TTY_ALIAS_PREFIX, (int)len, name, error); } else port->vtcport_flags |= VTCON_PORT_FLAG_ALIAS; } static void vtcon_port_drain_bufs(struct virtqueue *vq) { void *buf; int last; last = 0; while ((buf = virtqueue_drain(vq, &last)) != NULL) free(buf, M_DEVBUF); } static void vtcon_port_drain(struct vtcon_port *port) { vtcon_port_drain_bufs(port->vtcport_invq); } static void vtcon_port_teardown(struct vtcon_port *port) { struct tty *tp; tp = port->vtcport_tty; port->vtcport_flags |= VTCON_PORT_FLAG_GONE; if (tp != NULL) { atomic_add_int(&vtcon_pending_free, 1); tty_rel_gone(tp); } else vtcon_port_destroy(port); } static void vtcon_port_change_size(struct vtcon_port *port, uint16_t cols, uint16_t rows) { struct tty *tp; struct winsize sz; tp = port->vtcport_tty; if (tp == NULL) return; bzero(&sz, sizeof(struct winsize)); sz.ws_col = cols; sz.ws_row = rows; tty_set_winsize(tp, &sz); } static void vtcon_port_update_console_size(struct vtcon_softc *sc) { struct vtcon_port *port; struct vtcon_softc_port *scport; uint16_t cols, rows; vtcon_get_console_size(sc, &cols, &rows); /* * For now, assume the first (only) port is the console. Note * QEMU does not implement this feature yet. */ scport = &sc->vtcon_ports[0]; VTCON_LOCK(sc); port = scport->vcsp_port; if (port != NULL) { VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); vtcon_port_change_size(port, cols, rows); VTCON_PORT_UNLOCK(port); } else VTCON_UNLOCK(sc); } static void vtcon_port_enable_intr(struct vtcon_port *port) { /* * NOTE: The out virtqueue is always polled, so its interrupt * kept disabled. */ virtqueue_enable_intr(port->vtcport_invq); } static void vtcon_port_disable_intr(struct vtcon_port *port) { if (port->vtcport_invq != NULL) virtqueue_disable_intr(port->vtcport_invq); if (port->vtcport_outvq != NULL) virtqueue_disable_intr(port->vtcport_outvq); } static void vtcon_port_in(struct vtcon_port *port) { struct virtqueue *vq; struct tty *tp; char *buf; uint32_t len; int i, deq; tp = port->vtcport_tty; vq = port->vtcport_invq; again: deq = 0; while ((buf = virtqueue_dequeue(vq, &len)) != NULL) { for (i = 0; i < len; i++) { #if defined(KDB) if (port->vtcport_flags & VTCON_PORT_FLAG_CONSOLE) kdb_alt_break(buf[i], &port->vtcport_alt_break_state); #endif ttydisc_rint(tp, buf[i], 0); } vtcon_port_requeue_buf(port, buf); deq++; } ttydisc_rint_done(tp); if (deq > 0) virtqueue_notify(vq); if (virtqueue_enable_intr(vq) != 0) goto again; } static void vtcon_port_intr(void *scportx) { struct vtcon_softc_port *scport; struct vtcon_softc *sc; struct vtcon_port *port; scport = scportx; sc = scport->vcsp_sc; VTCON_LOCK(sc); port = scport->vcsp_port; if (port == NULL) { VTCON_UNLOCK(sc); return; } VTCON_PORT_LOCK(port); VTCON_UNLOCK(sc); if ((port->vtcport_flags & VTCON_PORT_FLAG_GONE) == 0) vtcon_port_in(port); VTCON_PORT_UNLOCK(port); } static void vtcon_port_out(struct vtcon_port *port, void *buf, int bufsize) { struct sglist_seg segs[2]; struct sglist sg; struct virtqueue *vq; int error; vq = port->vtcport_outvq; KASSERT(virtqueue_empty(vq), ("%s: port %p out virtqueue not emtpy", __func__, port)); sglist_init(&sg, 2, segs); error = sglist_append(&sg, buf, bufsize); KASSERT(error == 0, ("%s: error %d adding buffer to sglist", __func__, error)); error = virtqueue_enqueue(vq, buf, &sg, sg.sg_nseg, 0); if (error == 0) { virtqueue_notify(vq); virtqueue_poll(vq, NULL); } } static void vtcon_port_submit_event(struct vtcon_port *port, uint16_t event, uint16_t value) { struct vtcon_softc *sc; sc = port->vtcport_sc; vtcon_ctrl_send_control(sc, port->vtcport_id, event, value); } static int vtcon_tty_open(struct tty *tp) { struct vtcon_port *port; port = tty_softc(tp); if (port->vtcport_flags & VTCON_PORT_FLAG_GONE) return (ENXIO); vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_OPEN, 1); return (0); } static void vtcon_tty_close(struct tty *tp) { struct vtcon_port *port; port = tty_softc(tp); if (port->vtcport_flags & VTCON_PORT_FLAG_GONE) return; vtcon_port_submit_event(port, VIRTIO_CONSOLE_PORT_OPEN, 0); } static void vtcon_tty_outwakeup(struct tty *tp) { struct vtcon_port *port; char buf[VTCON_BULK_BUFSZ]; int len; port = tty_softc(tp); if (port->vtcport_flags & VTCON_PORT_FLAG_GONE) return; while ((len = ttydisc_getc(tp, buf, sizeof(buf))) != 0) vtcon_port_out(port, buf, len); } static void vtcon_tty_free(void *xport) { struct vtcon_port *port; port = xport; vtcon_port_destroy(port); atomic_subtract_int(&vtcon_pending_free, 1); } static void vtcon_get_console_size(struct vtcon_softc *sc, uint16_t *cols, uint16_t *rows) { struct virtio_console_config concfg; KASSERT(sc->vtcon_flags & VTCON_FLAG_SIZE, ("%s: size feature not negotiated", __func__)); vtcon_read_config(sc, &concfg); *cols = concfg.cols; *rows = concfg.rows; } static void vtcon_enable_interrupts(struct vtcon_softc *sc) { struct vtcon_softc_port *scport; struct vtcon_port *port; int i; VTCON_LOCK(sc); if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) virtqueue_enable_intr(sc->vtcon_ctrl_rxvq); for (i = 0; i < sc->vtcon_max_ports; i++) { scport = &sc->vtcon_ports[i]; port = scport->vcsp_port; if (port == NULL) continue; VTCON_PORT_LOCK(port); vtcon_port_enable_intr(port); VTCON_PORT_UNLOCK(port); } VTCON_UNLOCK(sc); } static void vtcon_disable_interrupts(struct vtcon_softc *sc) { struct vtcon_softc_port *scport; struct vtcon_port *port; int i; VTCON_LOCK_ASSERT(sc); if (sc->vtcon_flags & VTCON_FLAG_MULTIPORT) virtqueue_disable_intr(sc->vtcon_ctrl_rxvq); for (i = 0; i < sc->vtcon_max_ports; i++) { scport = &sc->vtcon_ports[i]; port = scport->vcsp_port; if (port == NULL) continue; VTCON_PORT_LOCK(port); vtcon_port_disable_intr(port); VTCON_PORT_UNLOCK(port); } } Index: head/sys/dev/virtio/mmio/virtio_mmio.c =================================================================== --- head/sys/dev/virtio/mmio/virtio_mmio.c (revision 327948) +++ head/sys/dev/virtio/mmio/virtio_mmio.c (revision 327949) @@ -1,862 +1,862 @@ /*- * Copyright (c) 2014 Ruslan Bukin * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * VirtIO MMIO interface. * This driver is heavily based on VirtIO PCI interface driver. */ /* * FDT example: * virtio_block@1000 { * compatible = "virtio,mmio"; * reg = <0x1000 0x100>; * interrupts = <63>; * interrupt-parent = <&GIC>; * }; */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_mmio_if.h" #include "virtio_bus_if.h" #include "virtio_if.h" #define PAGE_SHIFT 12 struct vtmmio_virtqueue { struct virtqueue *vtv_vq; int vtv_no_intr; }; struct vtmmio_softc { device_t dev; device_t platform; struct resource *res[2]; uint64_t vtmmio_features; uint32_t vtmmio_flags; /* This "bus" will only ever have one child. */ device_t vtmmio_child_dev; struct virtio_feature_desc *vtmmio_child_feat_desc; int vtmmio_nvqs; struct vtmmio_virtqueue *vtmmio_vqs; void *ih; }; static int vtmmio_probe(device_t); static int vtmmio_attach(device_t); static int vtmmio_detach(device_t); static int vtmmio_suspend(device_t); static int vtmmio_resume(device_t); static int vtmmio_shutdown(device_t); static void vtmmio_driver_added(device_t, driver_t *); static void vtmmio_child_detached(device_t, device_t); static int vtmmio_read_ivar(device_t, device_t, int, uintptr_t *); static int vtmmio_write_ivar(device_t, device_t, int, uintptr_t); static uint64_t vtmmio_negotiate_features(device_t, uint64_t); static int vtmmio_with_feature(device_t, uint64_t); static int vtmmio_alloc_virtqueues(device_t, int, int, struct vq_alloc_info *); static int vtmmio_setup_intr(device_t, enum intr_type); static void vtmmio_stop(device_t); static void vtmmio_poll(device_t); static int vtmmio_reinit(device_t, uint64_t); static void vtmmio_reinit_complete(device_t); static void vtmmio_notify_virtqueue(device_t, uint16_t); static uint8_t vtmmio_get_status(device_t); static void vtmmio_set_status(device_t, uint8_t); static void vtmmio_read_dev_config(device_t, bus_size_t, void *, int); static void vtmmio_write_dev_config(device_t, bus_size_t, void *, int); static void vtmmio_describe_features(struct vtmmio_softc *, const char *, uint64_t); static void vtmmio_probe_and_attach_child(struct vtmmio_softc *); static int vtmmio_reinit_virtqueue(struct vtmmio_softc *, int); static void vtmmio_free_interrupts(struct vtmmio_softc *); static void vtmmio_free_virtqueues(struct vtmmio_softc *); static void vtmmio_release_child_resources(struct vtmmio_softc *); static void vtmmio_reset(struct vtmmio_softc *); static void vtmmio_select_virtqueue(struct vtmmio_softc *, int); static void vtmmio_vq_intr(void *); /* * I/O port read/write wrappers. */ #define vtmmio_write_config_1(sc, o, v) \ do { \ if (sc->platform != NULL) \ VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ bus_write_1((sc)->res[0], (o), (v)); \ if (sc->platform != NULL) \ VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ } while (0) #define vtmmio_write_config_2(sc, o, v) \ do { \ if (sc->platform != NULL) \ VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ bus_write_2((sc)->res[0], (o), (v)); \ if (sc->platform != NULL) \ VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ } while (0) #define vtmmio_write_config_4(sc, o, v) \ do { \ if (sc->platform != NULL) \ VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ bus_write_4((sc)->res[0], (o), (v)); \ if (sc->platform != NULL) \ VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ } while (0) #define vtmmio_read_config_1(sc, o) \ bus_read_1((sc)->res[0], (o)) #define vtmmio_read_config_2(sc, o) \ bus_read_2((sc)->res[0], (o)) #define vtmmio_read_config_4(sc, o) \ bus_read_4((sc)->res[0], (o)) static device_method_t vtmmio_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, vtmmio_probe), DEVMETHOD(device_attach, vtmmio_attach), DEVMETHOD(device_detach, vtmmio_detach), DEVMETHOD(device_suspend, vtmmio_suspend), DEVMETHOD(device_resume, vtmmio_resume), DEVMETHOD(device_shutdown, vtmmio_shutdown), /* Bus interface. */ DEVMETHOD(bus_driver_added, vtmmio_driver_added), DEVMETHOD(bus_child_detached, vtmmio_child_detached), DEVMETHOD(bus_read_ivar, vtmmio_read_ivar), DEVMETHOD(bus_write_ivar, vtmmio_write_ivar), /* VirtIO bus interface. */ DEVMETHOD(virtio_bus_negotiate_features, vtmmio_negotiate_features), DEVMETHOD(virtio_bus_with_feature, vtmmio_with_feature), DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues), DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr), DEVMETHOD(virtio_bus_stop, vtmmio_stop), DEVMETHOD(virtio_bus_poll, vtmmio_poll), DEVMETHOD(virtio_bus_reinit, vtmmio_reinit), DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete), DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue), DEVMETHOD(virtio_bus_read_device_config, vtmmio_read_dev_config), DEVMETHOD(virtio_bus_write_device_config, vtmmio_write_dev_config), DEVMETHOD_END }; static driver_t vtmmio_driver = { "virtio_mmio", vtmmio_methods, sizeof(struct vtmmio_softc) }; devclass_t vtmmio_devclass; DRIVER_MODULE(virtio_mmio, simplebus, vtmmio_driver, vtmmio_devclass, 0, 0); DRIVER_MODULE(virtio_mmio, ofwbus, vtmmio_driver, vtmmio_devclass, 0, 0); MODULE_VERSION(virtio_mmio, 1); MODULE_DEPEND(virtio_mmio, simplebus, 1, 1, 1); MODULE_DEPEND(virtio_mmio, virtio, 1, 1, 1); static int vtmmio_setup_intr(device_t dev, enum intr_type type) { struct vtmmio_softc *sc; int rid; int err; sc = device_get_softc(dev); if (sc->platform != NULL) { err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev, vtmmio_vq_intr, sc); if (err == 0) { /* Okay we have backend-specific interrupts */ return (0); } } rid = 0; sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->res[1]) { device_printf(dev, "Can't allocate interrupt\n"); return (ENXIO); } if (bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE, NULL, vtmmio_vq_intr, sc, &sc->ih)) { device_printf(dev, "Can't setup the interrupt\n"); return (ENXIO); } return (0); } static int vtmmio_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "virtio,mmio")) return (ENXIO); device_set_desc(dev, "VirtIO MMIO adapter"); return (BUS_PROBE_DEFAULT); } static int vtmmio_setup_platform(struct vtmmio_softc *sc) { phandle_t platform_node; struct fdt_ic *ic; phandle_t xref; phandle_t node; sc->platform = NULL; if ((node = ofw_bus_get_node(sc->dev)) == -1) return (ENXIO); if (OF_searchencprop(node, "platform", &xref, sizeof(xref)) == -1) { return (ENXIO); } platform_node = OF_node_from_xref(xref); SLIST_FOREACH(ic, &fdt_ic_list_head, fdt_ics) { if (ic->iph == platform_node) { sc->platform = ic->dev; break; } } if (sc->platform == NULL) { /* No platform-specific device. Ignore it. */ } return (0); } static int vtmmio_attach(device_t dev) { struct vtmmio_softc *sc; device_t child; int rid; sc = device_get_softc(dev); sc->dev = dev; vtmmio_setup_platform(sc); rid = 0; sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->res[0]) { device_printf(dev, "Cannot allocate memory window.\n"); return (ENXIO); } vtmmio_reset(sc); /* Tell the host we've noticed this device. */ vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); if ((child = device_add_child(dev, NULL, -1)) == NULL) { device_printf(dev, "Cannot create child device.\n"); vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); vtmmio_detach(dev); return (ENOMEM); } sc->vtmmio_child_dev = child; vtmmio_probe_and_attach_child(sc); return (0); } static int vtmmio_detach(device_t dev) { struct vtmmio_softc *sc; device_t child; int error; sc = device_get_softc(dev); if ((child = sc->vtmmio_child_dev) != NULL) { error = device_delete_child(dev, child); if (error) return (error); sc->vtmmio_child_dev = NULL; } vtmmio_reset(sc); if (sc->res[0] != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res[0]); sc->res[0] = NULL; } return (0); } static int vtmmio_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int vtmmio_resume(device_t dev) { return (bus_generic_resume(dev)); } static int vtmmio_shutdown(device_t dev) { (void) bus_generic_shutdown(dev); /* Forcibly stop the host device. */ vtmmio_stop(dev); return (0); } static void vtmmio_driver_added(device_t dev, driver_t *driver) { struct vtmmio_softc *sc; sc = device_get_softc(dev); vtmmio_probe_and_attach_child(sc); } static void vtmmio_child_detached(device_t dev, device_t child) { struct vtmmio_softc *sc; sc = device_get_softc(dev); vtmmio_reset(sc); vtmmio_release_child_resources(sc); } static int vtmmio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct vtmmio_softc *sc; sc = device_get_softc(dev); if (sc->vtmmio_child_dev != child) return (ENOENT); switch (index) { case VIRTIO_IVAR_DEVTYPE: case VIRTIO_IVAR_SUBDEVICE: *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID); break; case VIRTIO_IVAR_VENDOR: *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID); break; default: return (ENOENT); } return (0); } static int vtmmio_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct vtmmio_softc *sc; sc = device_get_softc(dev); if (sc->vtmmio_child_dev != child) return (ENOENT); switch (index) { case VIRTIO_IVAR_FEATURE_DESC: sc->vtmmio_child_feat_desc = (void *) value; break; default: return (ENOENT); } return (0); } static uint64_t vtmmio_negotiate_features(device_t dev, uint64_t child_features) { struct vtmmio_softc *sc; uint64_t host_features, features; sc = device_get_softc(dev); host_features = vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES); vtmmio_describe_features(sc, "host", host_features); /* * Limit negotiated features to what the driver, virtqueue, and * host all support. */ features = host_features & child_features; features = virtqueue_filter_features(features); sc->vtmmio_features = features; vtmmio_describe_features(sc, "negotiated", features); vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features); return (features); } static int vtmmio_with_feature(device_t dev, uint64_t feature) { struct vtmmio_softc *sc; sc = device_get_softc(dev); return ((sc->vtmmio_features & feature) != 0); } static int vtmmio_alloc_virtqueues(device_t dev, int flags, int nvqs, struct vq_alloc_info *vq_info) { struct vtmmio_virtqueue *vqx; struct vq_alloc_info *info; struct vtmmio_softc *sc; struct virtqueue *vq; uint32_t size; int idx, error; sc = device_get_softc(dev); if (sc->vtmmio_nvqs != 0) return (EALREADY); if (nvqs <= 0) return (EINVAL); - sc->vtmmio_vqs = malloc(nvqs * sizeof(struct vtmmio_virtqueue), + sc->vtmmio_vqs = mallocarray(nvqs, sizeof(struct vtmmio_virtqueue), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtmmio_vqs == NULL) return (ENOMEM); vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, 1 << PAGE_SHIFT); for (idx = 0; idx < nvqs; idx++) { vqx = &sc->vtmmio_vqs[idx]; info = &vq_info[idx]; vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx); vtmmio_select_virtqueue(sc, idx); size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); error = virtqueue_alloc(dev, idx, size, VIRTIO_MMIO_VRING_ALIGN, 0xFFFFFFFFUL, info, &vq); if (error) { device_printf(dev, "cannot allocate virtqueue %d: %d\n", idx, error); break; } vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN, VIRTIO_MMIO_VRING_ALIGN); #if 0 device_printf(dev, "virtqueue paddr 0x%08lx\n", (uint64_t)virtqueue_paddr(vq)); #endif vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, virtqueue_paddr(vq) >> PAGE_SHIFT); vqx->vtv_vq = *info->vqai_vq = vq; vqx->vtv_no_intr = info->vqai_intr == NULL; sc->vtmmio_nvqs++; } if (error) vtmmio_free_virtqueues(sc); return (error); } static void vtmmio_stop(device_t dev) { vtmmio_reset(device_get_softc(dev)); } static void vtmmio_poll(device_t dev) { struct vtmmio_softc *sc; sc = device_get_softc(dev); if (sc->platform != NULL) VIRTIO_MMIO_POLL(sc->platform); } static int vtmmio_reinit(device_t dev, uint64_t features) { struct vtmmio_softc *sc; int idx, error; sc = device_get_softc(dev); if (vtmmio_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) vtmmio_stop(dev); /* * Quickly drive the status through ACK and DRIVER. The device * does not become usable again until vtmmio_reinit_complete(). */ vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); vtmmio_negotiate_features(dev, features); for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { error = vtmmio_reinit_virtqueue(sc, idx); if (error) return (error); } return (0); } static void vtmmio_reinit_complete(device_t dev) { vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); } static void vtmmio_notify_virtqueue(device_t dev, uint16_t queue) { struct vtmmio_softc *sc; sc = device_get_softc(dev); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NOTIFY, queue); } static uint8_t vtmmio_get_status(device_t dev) { struct vtmmio_softc *sc; sc = device_get_softc(dev); return (vtmmio_read_config_4(sc, VIRTIO_MMIO_STATUS)); } static void vtmmio_set_status(device_t dev, uint8_t status) { struct vtmmio_softc *sc; sc = device_get_softc(dev); if (status != VIRTIO_CONFIG_STATUS_RESET) status |= vtmmio_get_status(dev); vtmmio_write_config_4(sc, VIRTIO_MMIO_STATUS, status); } static void vtmmio_read_dev_config(device_t dev, bus_size_t offset, void *dst, int length) { struct vtmmio_softc *sc; bus_size_t off; uint8_t *d; int size; sc = device_get_softc(dev); off = VIRTIO_MMIO_CONFIG + offset; for (d = dst; length > 0; d += size, off += size, length -= size) { #ifdef ALLOW_WORD_ALIGNED_ACCESS if (length >= 4) { size = 4; *(uint32_t *)d = vtmmio_read_config_4(sc, off); } else if (length >= 2) { size = 2; *(uint16_t *)d = vtmmio_read_config_2(sc, off); } else #endif { size = 1; *d = vtmmio_read_config_1(sc, off); } } } static void vtmmio_write_dev_config(device_t dev, bus_size_t offset, void *src, int length) { struct vtmmio_softc *sc; bus_size_t off; uint8_t *s; int size; sc = device_get_softc(dev); off = VIRTIO_MMIO_CONFIG + offset; for (s = src; length > 0; s += size, off += size, length -= size) { #ifdef ALLOW_WORD_ALIGNED_ACCESS if (length >= 4) { size = 4; vtmmio_write_config_4(sc, off, *(uint32_t *)s); } else if (length >= 2) { size = 2; vtmmio_write_config_2(sc, off, *(uint16_t *)s); } else #endif { size = 1; vtmmio_write_config_1(sc, off, *s); } } } static void vtmmio_describe_features(struct vtmmio_softc *sc, const char *msg, uint64_t features) { device_t dev, child; dev = sc->dev; child = sc->vtmmio_child_dev; if (device_is_attached(child) || bootverbose == 0) return; virtio_describe(dev, msg, features, sc->vtmmio_child_feat_desc); } static void vtmmio_probe_and_attach_child(struct vtmmio_softc *sc) { device_t dev, child; dev = sc->dev; child = sc->vtmmio_child_dev; if (child == NULL) return; if (device_get_state(child) != DS_NOTPRESENT) { return; } if (device_probe(child) != 0) { return; } vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); if (device_attach(child) != 0) { vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); vtmmio_reset(sc); vtmmio_release_child_resources(sc); /* Reset status for future attempt. */ vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); } else { vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); VIRTIO_ATTACH_COMPLETED(child); } } static int vtmmio_reinit_virtqueue(struct vtmmio_softc *sc, int idx) { struct vtmmio_virtqueue *vqx; struct virtqueue *vq; int error; uint16_t size; vqx = &sc->vtmmio_vqs[idx]; vq = vqx->vtv_vq; KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx)); vtmmio_select_virtqueue(sc, idx); size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); error = virtqueue_reinit(vq, size); if (error) return (error); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, virtqueue_paddr(vq) >> PAGE_SHIFT); return (0); } static void vtmmio_free_interrupts(struct vtmmio_softc *sc) { if (sc->ih != NULL) bus_teardown_intr(sc->dev, sc->res[1], sc->ih); if (sc->res[1] != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res[1]); } static void vtmmio_free_virtqueues(struct vtmmio_softc *sc) { struct vtmmio_virtqueue *vqx; int idx; for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { vqx = &sc->vtmmio_vqs[idx]; vtmmio_select_virtqueue(sc, idx); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 0); virtqueue_free(vqx->vtv_vq); vqx->vtv_vq = NULL; } free(sc->vtmmio_vqs, M_DEVBUF); sc->vtmmio_vqs = NULL; sc->vtmmio_nvqs = 0; } static void vtmmio_release_child_resources(struct vtmmio_softc *sc) { vtmmio_free_interrupts(sc); vtmmio_free_virtqueues(sc); } static void vtmmio_reset(struct vtmmio_softc *sc) { /* * Setting the status to RESET sets the host device to * the original, uninitialized state. */ vtmmio_set_status(sc->dev, VIRTIO_CONFIG_STATUS_RESET); } static void vtmmio_select_virtqueue(struct vtmmio_softc *sc, int idx) { vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx); } static void vtmmio_vq_intr(void *arg) { struct vtmmio_virtqueue *vqx; struct vtmmio_softc *sc; struct virtqueue *vq; uint32_t status; int idx; sc = arg; status = vtmmio_read_config_4(sc, VIRTIO_MMIO_INTERRUPT_STATUS); vtmmio_write_config_4(sc, VIRTIO_MMIO_INTERRUPT_ACK, status); /* The config changed */ if (status & VIRTIO_MMIO_INT_CONFIG) if (sc->vtmmio_child_dev != NULL) VIRTIO_CONFIG_CHANGE(sc->vtmmio_child_dev); /* Notify all virtqueues. */ if (status & VIRTIO_MMIO_INT_VRING) { for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { vqx = &sc->vtmmio_vqs[idx]; if (vqx->vtv_no_intr == 0) { vq = vqx->vtv_vq; virtqueue_intr(vq); } } } } Index: head/sys/dev/virtio/network/if_vtnet.c =================================================================== --- head/sys/dev/virtio/network/if_vtnet.c (revision 327948) +++ head/sys/dev/virtio/network/if_vtnet.c (revision 327949) @@ -1,3978 +1,3979 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for VirtIO network devices. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_if.h" #include "opt_inet.h" #include "opt_inet6.h" static int vtnet_modevent(module_t, int, void *); static int vtnet_probe(device_t); static int vtnet_attach(device_t); static int vtnet_detach(device_t); static int vtnet_suspend(device_t); static int vtnet_resume(device_t); static int vtnet_shutdown(device_t); static int vtnet_attach_completed(device_t); static int vtnet_config_change(device_t); static void vtnet_negotiate_features(struct vtnet_softc *); static void vtnet_setup_features(struct vtnet_softc *); static int vtnet_init_rxq(struct vtnet_softc *, int); static int vtnet_init_txq(struct vtnet_softc *, int); static int vtnet_alloc_rxtx_queues(struct vtnet_softc *); static void vtnet_free_rxtx_queues(struct vtnet_softc *); static int vtnet_alloc_rx_filters(struct vtnet_softc *); static void vtnet_free_rx_filters(struct vtnet_softc *); static int vtnet_alloc_virtqueues(struct vtnet_softc *); static int vtnet_setup_interface(struct vtnet_softc *); static int vtnet_change_mtu(struct vtnet_softc *, int); static int vtnet_ioctl(struct ifnet *, u_long, caddr_t); static uint64_t vtnet_get_counter(struct ifnet *, ift_counter); static int vtnet_rxq_populate(struct vtnet_rxq *); static void vtnet_rxq_free_mbufs(struct vtnet_rxq *); static struct mbuf * vtnet_rx_alloc_buf(struct vtnet_softc *, int , struct mbuf **); static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *, struct mbuf *, int); static int vtnet_rxq_replace_buf(struct vtnet_rxq *, struct mbuf *, int); static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *, struct mbuf *); static int vtnet_rxq_new_buf(struct vtnet_rxq *); static int vtnet_rxq_csum(struct vtnet_rxq *, struct mbuf *, struct virtio_net_hdr *); static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *, int); static void vtnet_rxq_discard_buf(struct vtnet_rxq *, struct mbuf *); static int vtnet_rxq_merged_eof(struct vtnet_rxq *, struct mbuf *, int); static void vtnet_rxq_input(struct vtnet_rxq *, struct mbuf *, struct virtio_net_hdr *); static int vtnet_rxq_eof(struct vtnet_rxq *); static void vtnet_rx_vq_intr(void *); static void vtnet_rxq_tq_intr(void *, int); static int vtnet_txq_below_threshold(struct vtnet_txq *); static int vtnet_txq_notify(struct vtnet_txq *); static void vtnet_txq_free_mbufs(struct vtnet_txq *); static int vtnet_txq_offload_ctx(struct vtnet_txq *, struct mbuf *, int *, int *, int *); static int vtnet_txq_offload_tso(struct vtnet_txq *, struct mbuf *, int, int, struct virtio_net_hdr *); static struct mbuf * vtnet_txq_offload(struct vtnet_txq *, struct mbuf *, struct virtio_net_hdr *); static int vtnet_txq_enqueue_buf(struct vtnet_txq *, struct mbuf **, struct vtnet_tx_header *); static int vtnet_txq_encap(struct vtnet_txq *, struct mbuf **); #ifdef VTNET_LEGACY_TX static void vtnet_start_locked(struct vtnet_txq *, struct ifnet *); static void vtnet_start(struct ifnet *); #else static int vtnet_txq_mq_start_locked(struct vtnet_txq *, struct mbuf *); static int vtnet_txq_mq_start(struct ifnet *, struct mbuf *); static void vtnet_txq_tq_deferred(void *, int); #endif static void vtnet_txq_start(struct vtnet_txq *); static void vtnet_txq_tq_intr(void *, int); static int vtnet_txq_eof(struct vtnet_txq *); static void vtnet_tx_vq_intr(void *); static void vtnet_tx_start_all(struct vtnet_softc *); #ifndef VTNET_LEGACY_TX static void vtnet_qflush(struct ifnet *); #endif static int vtnet_watchdog(struct vtnet_txq *); static void vtnet_accum_stats(struct vtnet_softc *, struct vtnet_rxq_stats *, struct vtnet_txq_stats *); static void vtnet_tick(void *); static void vtnet_start_taskqueues(struct vtnet_softc *); static void vtnet_free_taskqueues(struct vtnet_softc *); static void vtnet_drain_taskqueues(struct vtnet_softc *); static void vtnet_drain_rxtx_queues(struct vtnet_softc *); static void vtnet_stop_rendezvous(struct vtnet_softc *); static void vtnet_stop(struct vtnet_softc *); static int vtnet_virtio_reinit(struct vtnet_softc *); static void vtnet_init_rx_filters(struct vtnet_softc *); static int vtnet_init_rx_queues(struct vtnet_softc *); static int vtnet_init_tx_queues(struct vtnet_softc *); static int vtnet_init_rxtx_queues(struct vtnet_softc *); static void vtnet_set_active_vq_pairs(struct vtnet_softc *); static int vtnet_reinit(struct vtnet_softc *); static void vtnet_init_locked(struct vtnet_softc *); static void vtnet_init(void *); static void vtnet_free_ctrl_vq(struct vtnet_softc *); static void vtnet_exec_ctrl_cmd(struct vtnet_softc *, void *, struct sglist *, int, int); static int vtnet_ctrl_mac_cmd(struct vtnet_softc *, uint8_t *); static int vtnet_ctrl_mq_cmd(struct vtnet_softc *, uint16_t); static int vtnet_ctrl_rx_cmd(struct vtnet_softc *, int, int); static int vtnet_set_promisc(struct vtnet_softc *, int); static int vtnet_set_allmulti(struct vtnet_softc *, int); static void vtnet_attach_disable_promisc(struct vtnet_softc *); static void vtnet_rx_filter(struct vtnet_softc *); static void vtnet_rx_filter_mac(struct vtnet_softc *); static int vtnet_exec_vlan_filter(struct vtnet_softc *, int, uint16_t); static void vtnet_rx_filter_vlan(struct vtnet_softc *); static void vtnet_update_vlan_filter(struct vtnet_softc *, int, uint16_t); static void vtnet_register_vlan(void *, struct ifnet *, uint16_t); static void vtnet_unregister_vlan(void *, struct ifnet *, uint16_t); static int vtnet_is_link_up(struct vtnet_softc *); static void vtnet_update_link_status(struct vtnet_softc *); static int vtnet_ifmedia_upd(struct ifnet *); static void vtnet_ifmedia_sts(struct ifnet *, struct ifmediareq *); static void vtnet_get_hwaddr(struct vtnet_softc *); static void vtnet_set_hwaddr(struct vtnet_softc *); static void vtnet_vlan_tag_remove(struct mbuf *); static void vtnet_set_rx_process_limit(struct vtnet_softc *); static void vtnet_set_tx_intr_threshold(struct vtnet_softc *); static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct vtnet_rxq *); static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *, struct sysctl_oid_list *, struct vtnet_txq *); static void vtnet_setup_queue_sysctl(struct vtnet_softc *); static void vtnet_setup_sysctl(struct vtnet_softc *); static int vtnet_rxq_enable_intr(struct vtnet_rxq *); static void vtnet_rxq_disable_intr(struct vtnet_rxq *); static int vtnet_txq_enable_intr(struct vtnet_txq *); static void vtnet_txq_disable_intr(struct vtnet_txq *); static void vtnet_enable_rx_interrupts(struct vtnet_softc *); static void vtnet_enable_tx_interrupts(struct vtnet_softc *); static void vtnet_enable_interrupts(struct vtnet_softc *); static void vtnet_disable_rx_interrupts(struct vtnet_softc *); static void vtnet_disable_tx_interrupts(struct vtnet_softc *); static void vtnet_disable_interrupts(struct vtnet_softc *); static int vtnet_tunable_int(struct vtnet_softc *, const char *, int); /* Tunables. */ static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters"); static int vtnet_csum_disable = 0; TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable); SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN, &vtnet_csum_disable, 0, "Disables receive and send checksum offload"); static int vtnet_tso_disable = 0; TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable); SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable, 0, "Disables TCP Segmentation Offload"); static int vtnet_lro_disable = 0; TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable); SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable, 0, "Disables TCP Large Receive Offload"); static int vtnet_mq_disable = 0; TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable); SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable, 0, "Disables Multi Queue support"); static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS; TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs); SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN, &vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs"); static int vtnet_rx_process_limit = 512; TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit); SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN, &vtnet_rx_process_limit, 0, "Limits the number RX segments processed in a single pass"); static uma_zone_t vtnet_tx_header_zone; static struct virtio_feature_desc vtnet_feature_desc[] = { { VIRTIO_NET_F_CSUM, "TxChecksum" }, { VIRTIO_NET_F_GUEST_CSUM, "RxChecksum" }, { VIRTIO_NET_F_MAC, "MacAddress" }, { VIRTIO_NET_F_GSO, "TxAllGSO" }, { VIRTIO_NET_F_GUEST_TSO4, "RxTSOv4" }, { VIRTIO_NET_F_GUEST_TSO6, "RxTSOv6" }, { VIRTIO_NET_F_GUEST_ECN, "RxECN" }, { VIRTIO_NET_F_GUEST_UFO, "RxUFO" }, { VIRTIO_NET_F_HOST_TSO4, "TxTSOv4" }, { VIRTIO_NET_F_HOST_TSO6, "TxTSOv6" }, { VIRTIO_NET_F_HOST_ECN, "TxTSOECN" }, { VIRTIO_NET_F_HOST_UFO, "TxUFO" }, { VIRTIO_NET_F_MRG_RXBUF, "MrgRxBuf" }, { VIRTIO_NET_F_STATUS, "Status" }, { VIRTIO_NET_F_CTRL_VQ, "ControlVq" }, { VIRTIO_NET_F_CTRL_RX, "RxMode" }, { VIRTIO_NET_F_CTRL_VLAN, "VLanFilter" }, { VIRTIO_NET_F_CTRL_RX_EXTRA, "RxModeExtra" }, { VIRTIO_NET_F_GUEST_ANNOUNCE, "GuestAnnounce" }, { VIRTIO_NET_F_MQ, "Multiqueue" }, { VIRTIO_NET_F_CTRL_MAC_ADDR, "SetMacAddress" }, { 0, NULL } }; static device_method_t vtnet_methods[] = { /* Device methods. */ DEVMETHOD(device_probe, vtnet_probe), DEVMETHOD(device_attach, vtnet_attach), DEVMETHOD(device_detach, vtnet_detach), DEVMETHOD(device_suspend, vtnet_suspend), DEVMETHOD(device_resume, vtnet_resume), DEVMETHOD(device_shutdown, vtnet_shutdown), /* VirtIO methods. */ DEVMETHOD(virtio_attach_completed, vtnet_attach_completed), DEVMETHOD(virtio_config_change, vtnet_config_change), DEVMETHOD_END }; #ifdef DEV_NETMAP #include #endif /* DEV_NETMAP */ static driver_t vtnet_driver = { "vtnet", vtnet_methods, sizeof(struct vtnet_softc) }; static devclass_t vtnet_devclass; DRIVER_MODULE(vtnet, virtio_mmio, vtnet_driver, vtnet_devclass, vtnet_modevent, 0); DRIVER_MODULE(vtnet, virtio_pci, vtnet_driver, vtnet_devclass, vtnet_modevent, 0); MODULE_VERSION(vtnet, 1); MODULE_DEPEND(vtnet, virtio, 1, 1, 1); #ifdef DEV_NETMAP MODULE_DEPEND(vtnet, netmap, 1, 1, 1); #endif /* DEV_NETMAP */ static int vtnet_modevent(module_t mod, int type, void *unused) { int error = 0; static int loaded = 0; switch (type) { case MOD_LOAD: if (loaded++ == 0) vtnet_tx_header_zone = uma_zcreate("vtnet_tx_hdr", sizeof(struct vtnet_tx_header), NULL, NULL, NULL, NULL, 0, 0); break; case MOD_QUIESCE: if (uma_zone_get_cur(vtnet_tx_header_zone) > 0) error = EBUSY; break; case MOD_UNLOAD: if (--loaded == 0) { uma_zdestroy(vtnet_tx_header_zone); vtnet_tx_header_zone = NULL; } break; case MOD_SHUTDOWN: break; default: error = EOPNOTSUPP; break; } return (error); } static int vtnet_probe(device_t dev) { if (virtio_get_device_type(dev) != VIRTIO_ID_NETWORK) return (ENXIO); device_set_desc(dev, "VirtIO Networking Adapter"); return (BUS_PROBE_DEFAULT); } static int vtnet_attach(device_t dev) { struct vtnet_softc *sc; int error; sc = device_get_softc(dev); sc->vtnet_dev = dev; /* Register our feature descriptions. */ virtio_set_feature_desc(dev, vtnet_feature_desc); VTNET_CORE_LOCK_INIT(sc); callout_init_mtx(&sc->vtnet_tick_ch, VTNET_CORE_MTX(sc), 0); vtnet_setup_sysctl(sc); vtnet_setup_features(sc); error = vtnet_alloc_rx_filters(sc); if (error) { device_printf(dev, "cannot allocate Rx filters\n"); goto fail; } error = vtnet_alloc_rxtx_queues(sc); if (error) { device_printf(dev, "cannot allocate queues\n"); goto fail; } error = vtnet_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } error = vtnet_setup_interface(sc); if (error) { device_printf(dev, "cannot setup interface\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_NET); if (error) { device_printf(dev, "cannot setup virtqueue interrupts\n"); /* BMV: This will crash if during boot! */ ether_ifdetach(sc->vtnet_ifp); goto fail; } #ifdef DEV_NETMAP vtnet_netmap_attach(sc); #endif /* DEV_NETMAP */ vtnet_start_taskqueues(sc); fail: if (error) vtnet_detach(dev); return (error); } static int vtnet_detach(device_t dev) { struct vtnet_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->vtnet_ifp; if (device_is_attached(dev)) { VTNET_CORE_LOCK(sc); vtnet_stop(sc); VTNET_CORE_UNLOCK(sc); callout_drain(&sc->vtnet_tick_ch); vtnet_drain_taskqueues(sc); ether_ifdetach(ifp); } #ifdef DEV_NETMAP netmap_detach(ifp); #endif /* DEV_NETMAP */ vtnet_free_taskqueues(sc); if (sc->vtnet_vlan_attach != NULL) { EVENTHANDLER_DEREGISTER(vlan_config, sc->vtnet_vlan_attach); sc->vtnet_vlan_attach = NULL; } if (sc->vtnet_vlan_detach != NULL) { EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vtnet_vlan_detach); sc->vtnet_vlan_detach = NULL; } ifmedia_removeall(&sc->vtnet_media); if (ifp != NULL) { if_free(ifp); sc->vtnet_ifp = NULL; } vtnet_free_rxtx_queues(sc); vtnet_free_rx_filters(sc); if (sc->vtnet_ctrl_vq != NULL) vtnet_free_ctrl_vq(sc); VTNET_CORE_LOCK_DESTROY(sc); return (0); } static int vtnet_suspend(device_t dev) { struct vtnet_softc *sc; sc = device_get_softc(dev); VTNET_CORE_LOCK(sc); vtnet_stop(sc); sc->vtnet_flags |= VTNET_FLAG_SUSPENDED; VTNET_CORE_UNLOCK(sc); return (0); } static int vtnet_resume(device_t dev) { struct vtnet_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->vtnet_ifp; VTNET_CORE_LOCK(sc); if (ifp->if_flags & IFF_UP) vtnet_init_locked(sc); sc->vtnet_flags &= ~VTNET_FLAG_SUSPENDED; VTNET_CORE_UNLOCK(sc); return (0); } static int vtnet_shutdown(device_t dev) { /* * Suspend already does all of what we need to * do here; we just never expect to be resumed. */ return (vtnet_suspend(dev)); } static int vtnet_attach_completed(device_t dev) { vtnet_attach_disable_promisc(device_get_softc(dev)); return (0); } static int vtnet_config_change(device_t dev) { struct vtnet_softc *sc; sc = device_get_softc(dev); VTNET_CORE_LOCK(sc); vtnet_update_link_status(sc); if (sc->vtnet_link_active != 0) vtnet_tx_start_all(sc); VTNET_CORE_UNLOCK(sc); return (0); } static void vtnet_negotiate_features(struct vtnet_softc *sc) { device_t dev; uint64_t mask, features; dev = sc->vtnet_dev; mask = 0; /* * TSO and LRO are only available when their corresponding checksum * offload feature is also negotiated. */ if (vtnet_tunable_int(sc, "csum_disable", vtnet_csum_disable)) { mask |= VIRTIO_NET_F_CSUM | VIRTIO_NET_F_GUEST_CSUM; mask |= VTNET_TSO_FEATURES | VTNET_LRO_FEATURES; } if (vtnet_tunable_int(sc, "tso_disable", vtnet_tso_disable)) mask |= VTNET_TSO_FEATURES; if (vtnet_tunable_int(sc, "lro_disable", vtnet_lro_disable)) mask |= VTNET_LRO_FEATURES; #ifndef VTNET_LEGACY_TX if (vtnet_tunable_int(sc, "mq_disable", vtnet_mq_disable)) mask |= VIRTIO_NET_F_MQ; #else mask |= VIRTIO_NET_F_MQ; #endif features = VTNET_FEATURES & ~mask; sc->vtnet_features = virtio_negotiate_features(dev, features); if (virtio_with_feature(dev, VTNET_LRO_FEATURES) && virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF) == 0) { /* * LRO without mergeable buffers requires special care. This * is not ideal because every receive buffer must be large * enough to hold the maximum TCP packet, the Ethernet header, * and the header. This requires up to 34 descriptors with * MCLBYTES clusters. If we do not have indirect descriptors, * LRO is disabled since the virtqueue will not contain very * many receive buffers. */ if (!virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) { device_printf(dev, "LRO disabled due to both mergeable buffers and " "indirect descriptors not negotiated\n"); features &= ~VTNET_LRO_FEATURES; sc->vtnet_features = virtio_negotiate_features(dev, features); } else sc->vtnet_flags |= VTNET_FLAG_LRO_NOMRG; } } static void vtnet_setup_features(struct vtnet_softc *sc) { device_t dev; dev = sc->vtnet_dev; vtnet_negotiate_features(sc); if (virtio_with_feature(dev, VIRTIO_RING_F_INDIRECT_DESC)) sc->vtnet_flags |= VTNET_FLAG_INDIRECT; if (virtio_with_feature(dev, VIRTIO_RING_F_EVENT_IDX)) sc->vtnet_flags |= VTNET_FLAG_EVENT_IDX; if (virtio_with_feature(dev, VIRTIO_NET_F_MAC)) { /* This feature should always be negotiated. */ sc->vtnet_flags |= VTNET_FLAG_MAC; } if (virtio_with_feature(dev, VIRTIO_NET_F_MRG_RXBUF)) { sc->vtnet_flags |= VTNET_FLAG_MRG_RXBUFS; sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr_mrg_rxbuf); } else sc->vtnet_hdr_size = sizeof(struct virtio_net_hdr); if (sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) sc->vtnet_rx_nsegs = VTNET_MRG_RX_SEGS; else if (sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG) sc->vtnet_rx_nsegs = VTNET_MAX_RX_SEGS; else sc->vtnet_rx_nsegs = VTNET_MIN_RX_SEGS; if (virtio_with_feature(dev, VIRTIO_NET_F_GSO) || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4) || virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) sc->vtnet_tx_nsegs = VTNET_MAX_TX_SEGS; else sc->vtnet_tx_nsegs = VTNET_MIN_TX_SEGS; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VQ)) { sc->vtnet_flags |= VTNET_FLAG_CTRL_VQ; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_RX)) sc->vtnet_flags |= VTNET_FLAG_CTRL_RX; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_VLAN)) sc->vtnet_flags |= VTNET_FLAG_VLAN_FILTER; if (virtio_with_feature(dev, VIRTIO_NET_F_CTRL_MAC_ADDR)) sc->vtnet_flags |= VTNET_FLAG_CTRL_MAC; } if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) && sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev, offsetof(struct virtio_net_config, max_virtqueue_pairs)); } else sc->vtnet_max_vq_pairs = 1; if (sc->vtnet_max_vq_pairs > 1) { /* * Limit the maximum number of queue pairs to the lower of * the number of CPUs and the configured maximum. * The actual number of queues that get used may be less. */ int max; max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs); if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) { if (max > mp_ncpus) max = mp_ncpus; if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX) max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX; if (max > 1) { sc->vtnet_requested_vq_pairs = max; sc->vtnet_flags |= VTNET_FLAG_MULTIQ; } } } } static int vtnet_init_rxq(struct vtnet_softc *sc, int id) { struct vtnet_rxq *rxq; rxq = &sc->vtnet_rxqs[id]; snprintf(rxq->vtnrx_name, sizeof(rxq->vtnrx_name), "%s-rx%d", device_get_nameunit(sc->vtnet_dev), id); mtx_init(&rxq->vtnrx_mtx, rxq->vtnrx_name, NULL, MTX_DEF); rxq->vtnrx_sc = sc; rxq->vtnrx_id = id; rxq->vtnrx_sg = sglist_alloc(sc->vtnet_rx_nsegs, M_NOWAIT); if (rxq->vtnrx_sg == NULL) return (ENOMEM); TASK_INIT(&rxq->vtnrx_intrtask, 0, vtnet_rxq_tq_intr, rxq); rxq->vtnrx_tq = taskqueue_create(rxq->vtnrx_name, M_NOWAIT, taskqueue_thread_enqueue, &rxq->vtnrx_tq); return (rxq->vtnrx_tq == NULL ? ENOMEM : 0); } static int vtnet_init_txq(struct vtnet_softc *sc, int id) { struct vtnet_txq *txq; txq = &sc->vtnet_txqs[id]; snprintf(txq->vtntx_name, sizeof(txq->vtntx_name), "%s-tx%d", device_get_nameunit(sc->vtnet_dev), id); mtx_init(&txq->vtntx_mtx, txq->vtntx_name, NULL, MTX_DEF); txq->vtntx_sc = sc; txq->vtntx_id = id; txq->vtntx_sg = sglist_alloc(sc->vtnet_tx_nsegs, M_NOWAIT); if (txq->vtntx_sg == NULL) return (ENOMEM); #ifndef VTNET_LEGACY_TX txq->vtntx_br = buf_ring_alloc(VTNET_DEFAULT_BUFRING_SIZE, M_DEVBUF, M_NOWAIT, &txq->vtntx_mtx); if (txq->vtntx_br == NULL) return (ENOMEM); TASK_INIT(&txq->vtntx_defrtask, 0, vtnet_txq_tq_deferred, txq); #endif TASK_INIT(&txq->vtntx_intrtask, 0, vtnet_txq_tq_intr, txq); txq->vtntx_tq = taskqueue_create(txq->vtntx_name, M_NOWAIT, taskqueue_thread_enqueue, &txq->vtntx_tq); if (txq->vtntx_tq == NULL) return (ENOMEM); return (0); } static int vtnet_alloc_rxtx_queues(struct vtnet_softc *sc) { int i, npairs, error; npairs = sc->vtnet_max_vq_pairs; - sc->vtnet_rxqs = malloc(sizeof(struct vtnet_rxq) * npairs, M_DEVBUF, + sc->vtnet_rxqs = mallocarray(npairs, sizeof(struct vtnet_rxq), M_DEVBUF, M_NOWAIT | M_ZERO); - sc->vtnet_txqs = malloc(sizeof(struct vtnet_txq) * npairs, M_DEVBUF, + sc->vtnet_txqs = mallocarray(npairs, sizeof(struct vtnet_txq), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtnet_rxqs == NULL || sc->vtnet_txqs == NULL) return (ENOMEM); for (i = 0; i < npairs; i++) { error = vtnet_init_rxq(sc, i); if (error) return (error); error = vtnet_init_txq(sc, i); if (error) return (error); } vtnet_setup_queue_sysctl(sc); return (0); } static void vtnet_destroy_rxq(struct vtnet_rxq *rxq) { rxq->vtnrx_sc = NULL; rxq->vtnrx_id = -1; if (rxq->vtnrx_sg != NULL) { sglist_free(rxq->vtnrx_sg); rxq->vtnrx_sg = NULL; } if (mtx_initialized(&rxq->vtnrx_mtx) != 0) mtx_destroy(&rxq->vtnrx_mtx); } static void vtnet_destroy_txq(struct vtnet_txq *txq) { txq->vtntx_sc = NULL; txq->vtntx_id = -1; if (txq->vtntx_sg != NULL) { sglist_free(txq->vtntx_sg); txq->vtntx_sg = NULL; } #ifndef VTNET_LEGACY_TX if (txq->vtntx_br != NULL) { buf_ring_free(txq->vtntx_br, M_DEVBUF); txq->vtntx_br = NULL; } #endif if (mtx_initialized(&txq->vtntx_mtx) != 0) mtx_destroy(&txq->vtntx_mtx); } static void vtnet_free_rxtx_queues(struct vtnet_softc *sc) { int i; if (sc->vtnet_rxqs != NULL) { for (i = 0; i < sc->vtnet_max_vq_pairs; i++) vtnet_destroy_rxq(&sc->vtnet_rxqs[i]); free(sc->vtnet_rxqs, M_DEVBUF); sc->vtnet_rxqs = NULL; } if (sc->vtnet_txqs != NULL) { for (i = 0; i < sc->vtnet_max_vq_pairs; i++) vtnet_destroy_txq(&sc->vtnet_txqs[i]); free(sc->vtnet_txqs, M_DEVBUF); sc->vtnet_txqs = NULL; } } static int vtnet_alloc_rx_filters(struct vtnet_softc *sc) { if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { sc->vtnet_mac_filter = malloc(sizeof(struct vtnet_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtnet_mac_filter == NULL) return (ENOMEM); } if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { sc->vtnet_vlan_filter = malloc(sizeof(uint32_t) * VTNET_VLAN_FILTER_NWORDS, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtnet_vlan_filter == NULL) return (ENOMEM); } return (0); } static void vtnet_free_rx_filters(struct vtnet_softc *sc) { if (sc->vtnet_mac_filter != NULL) { free(sc->vtnet_mac_filter, M_DEVBUF); sc->vtnet_mac_filter = NULL; } if (sc->vtnet_vlan_filter != NULL) { free(sc->vtnet_vlan_filter, M_DEVBUF); sc->vtnet_vlan_filter = NULL; } } static int vtnet_alloc_virtqueues(struct vtnet_softc *sc) { device_t dev; struct vq_alloc_info *info; struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i, idx, flags, nvqs, error; dev = sc->vtnet_dev; flags = 0; nvqs = sc->vtnet_max_vq_pairs * 2; if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) nvqs++; - info = malloc(sizeof(struct vq_alloc_info) * nvqs, M_TEMP, M_NOWAIT); + info = mallocarray(nvqs, sizeof(struct vq_alloc_info), M_TEMP, + M_NOWAIT); if (info == NULL) return (ENOMEM); for (i = 0, idx = 0; i < sc->vtnet_max_vq_pairs; i++, idx+=2) { rxq = &sc->vtnet_rxqs[i]; VQ_ALLOC_INFO_INIT(&info[idx], sc->vtnet_rx_nsegs, vtnet_rx_vq_intr, rxq, &rxq->vtnrx_vq, "%s-%d rx", device_get_nameunit(dev), rxq->vtnrx_id); txq = &sc->vtnet_txqs[i]; VQ_ALLOC_INFO_INIT(&info[idx+1], sc->vtnet_tx_nsegs, vtnet_tx_vq_intr, txq, &txq->vtntx_vq, "%s-%d tx", device_get_nameunit(dev), txq->vtntx_id); } if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) { VQ_ALLOC_INFO_INIT(&info[idx], 0, NULL, NULL, &sc->vtnet_ctrl_vq, "%s ctrl", device_get_nameunit(dev)); } /* * Enable interrupt binding if this is multiqueue. This only matters * when per-vq MSIX is available. */ if (sc->vtnet_flags & VTNET_FLAG_MULTIQ) flags |= 0; error = virtio_alloc_virtqueues(dev, flags, nvqs, info); free(info, M_TEMP); return (error); } static int vtnet_setup_interface(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "cannot allocate ifnet structure\n"); return (ENOSPC); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_baudrate = IF_Gbps(10); /* Approx. */ ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = vtnet_init; ifp->if_ioctl = vtnet_ioctl; ifp->if_get_counter = vtnet_get_counter; #ifndef VTNET_LEGACY_TX ifp->if_transmit = vtnet_txq_mq_start; ifp->if_qflush = vtnet_qflush; #else struct virtqueue *vq = sc->vtnet_txqs[0].vtntx_vq; ifp->if_start = vtnet_start; IFQ_SET_MAXLEN(&ifp->if_snd, virtqueue_size(vq) - 1); ifp->if_snd.ifq_drv_maxlen = virtqueue_size(vq) - 1; IFQ_SET_READY(&ifp->if_snd); #endif ifmedia_init(&sc->vtnet_media, IFM_IMASK, vtnet_ifmedia_upd, vtnet_ifmedia_sts); ifmedia_add(&sc->vtnet_media, VTNET_MEDIATYPE, 0, NULL); ifmedia_set(&sc->vtnet_media, VTNET_MEDIATYPE); /* Read (or generate) the MAC address for the adapter. */ vtnet_get_hwaddr(sc); ether_ifattach(ifp, sc->vtnet_hwaddr); if (virtio_with_feature(dev, VIRTIO_NET_F_STATUS)) ifp->if_capabilities |= IFCAP_LINKSTATE; /* Tell the upper layer(s) we support long frames. */ ifp->if_hdrlen = sizeof(struct ether_vlan_header); ifp->if_capabilities |= IFCAP_JUMBO_MTU | IFCAP_VLAN_MTU; if (virtio_with_feature(dev, VIRTIO_NET_F_CSUM)) { ifp->if_capabilities |= IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6; if (virtio_with_feature(dev, VIRTIO_NET_F_GSO)) { ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6; sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; } else { if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO4)) ifp->if_capabilities |= IFCAP_TSO4; if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_TSO6)) ifp->if_capabilities |= IFCAP_TSO6; if (virtio_with_feature(dev, VIRTIO_NET_F_HOST_ECN)) sc->vtnet_flags |= VTNET_FLAG_TSO_ECN; } if (ifp->if_capabilities & IFCAP_TSO) ifp->if_capabilities |= IFCAP_VLAN_HWTSO; } if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_CSUM)) { ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6; if (virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO4) || virtio_with_feature(dev, VIRTIO_NET_F_GUEST_TSO6)) ifp->if_capabilities |= IFCAP_LRO; } if (ifp->if_capabilities & IFCAP_HWCSUM) { /* * VirtIO does not support VLAN tagging, but we can fake * it by inserting and removing the 802.1Q header during * transmit and receive. We are then able to do checksum * offloading of VLAN frames. */ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; } ifp->if_capenable = ifp->if_capabilities; /* * Capabilities after here are not enabled by default. */ if (sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER) { ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; sc->vtnet_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, vtnet_register_vlan, sc, EVENTHANDLER_PRI_FIRST); sc->vtnet_vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, vtnet_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); } vtnet_set_rx_process_limit(sc); vtnet_set_tx_intr_threshold(sc); return (0); } static int vtnet_change_mtu(struct vtnet_softc *sc, int new_mtu) { struct ifnet *ifp; int frame_size, clsize; ifp = sc->vtnet_ifp; if (new_mtu < ETHERMIN || new_mtu > VTNET_MAX_MTU) return (EINVAL); frame_size = sc->vtnet_hdr_size + sizeof(struct ether_vlan_header) + new_mtu; /* * Based on the new MTU (and hence frame size) determine which * cluster size is most appropriate for the receive queues. */ if (frame_size <= MCLBYTES) { clsize = MCLBYTES; } else if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { /* Avoid going past 9K jumbos. */ if (frame_size > MJUM9BYTES) return (EINVAL); clsize = MJUM9BYTES; } else clsize = MJUMPAGESIZE; ifp->if_mtu = new_mtu; sc->vtnet_rx_new_clsize = clsize; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vtnet_init_locked(sc); } return (0); } static int vtnet_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct vtnet_softc *sc; struct ifreq *ifr; int reinit, mask, error; sc = ifp->if_softc; ifr = (struct ifreq *) data; error = 0; switch (cmd) { case SIOCSIFMTU: if (ifp->if_mtu != ifr->ifr_mtu) { VTNET_CORE_LOCK(sc); error = vtnet_change_mtu(sc, ifr->ifr_mtu); VTNET_CORE_UNLOCK(sc); } break; case SIOCSIFFLAGS: VTNET_CORE_LOCK(sc); if ((ifp->if_flags & IFF_UP) == 0) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) vtnet_stop(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if ((ifp->if_flags ^ sc->vtnet_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) vtnet_rx_filter(sc); else { ifp->if_flags |= IFF_PROMISC; if ((ifp->if_flags ^ sc->vtnet_if_flags) & IFF_ALLMULTI) error = ENOTSUP; } } } else vtnet_init_locked(sc); if (error == 0) sc->vtnet_if_flags = ifp->if_flags; VTNET_CORE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) break; VTNET_CORE_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) vtnet_rx_filter_mac(sc); VTNET_CORE_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->vtnet_media, cmd); break; case SIOCSIFCAP: VTNET_CORE_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) ifp->if_capenable ^= IFCAP_TXCSUM; if (mask & IFCAP_TXCSUM_IPV6) ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; if (mask & IFCAP_TSO4) ifp->if_capenable ^= IFCAP_TSO4; if (mask & IFCAP_TSO6) ifp->if_capenable ^= IFCAP_TSO6; if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO | IFCAP_VLAN_HWFILTER)) { /* These Rx features require us to renegotiate. */ reinit = 1; if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_VLAN_HWFILTER) ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; } else reinit = 0; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vtnet_init_locked(sc); } VTNET_CORE_UNLOCK(sc); VLAN_CAPABILITIES(ifp); break; default: error = ether_ioctl(ifp, cmd, data); break; } VTNET_CORE_LOCK_ASSERT_NOTOWNED(sc); return (error); } static int vtnet_rxq_populate(struct vtnet_rxq *rxq) { struct virtqueue *vq; int nbufs, error; vq = rxq->vtnrx_vq; error = ENOSPC; for (nbufs = 0; !virtqueue_full(vq); nbufs++) { error = vtnet_rxq_new_buf(rxq); if (error) break; } if (nbufs > 0) { virtqueue_notify(vq); /* * EMSGSIZE signifies the virtqueue did not have enough * entries available to hold the last mbuf. This is not * an error. */ if (error == EMSGSIZE) error = 0; } return (error); } static void vtnet_rxq_free_mbufs(struct vtnet_rxq *rxq) { struct virtqueue *vq; struct mbuf *m; int last; vq = rxq->vtnrx_vq; last = 0; while ((m = virtqueue_drain(vq, &last)) != NULL) m_freem(m); KASSERT(virtqueue_empty(vq), ("%s: mbufs remaining in rx queue %p", __func__, rxq)); } static struct mbuf * vtnet_rx_alloc_buf(struct vtnet_softc *sc, int nbufs, struct mbuf **m_tailp) { struct mbuf *m_head, *m_tail, *m; int i, clsize; clsize = sc->vtnet_rx_clsize; KASSERT(nbufs == 1 || sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG, ("%s: chained mbuf %d request without LRO_NOMRG", __func__, nbufs)); m_head = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, clsize); if (m_head == NULL) goto fail; m_head->m_len = clsize; m_tail = m_head; /* Allocate the rest of the chain. */ for (i = 1; i < nbufs; i++) { m = m_getjcl(M_NOWAIT, MT_DATA, 0, clsize); if (m == NULL) goto fail; m->m_len = clsize; m_tail->m_next = m; m_tail = m; } if (m_tailp != NULL) *m_tailp = m_tail; return (m_head); fail: sc->vtnet_stats.mbuf_alloc_failed++; m_freem(m_head); return (NULL); } /* * Slow path for when LRO without mergeable buffers is negotiated. */ static int vtnet_rxq_replace_lro_nomgr_buf(struct vtnet_rxq *rxq, struct mbuf *m0, int len0) { struct vtnet_softc *sc; struct mbuf *m, *m_prev; struct mbuf *m_new, *m_tail; int len, clsize, nreplace, error; sc = rxq->vtnrx_sc; clsize = sc->vtnet_rx_clsize; m_prev = NULL; m_tail = NULL; nreplace = 0; m = m0; len = len0; /* * Since these mbuf chains are so large, we avoid allocating an * entire replacement chain if possible. When the received frame * did not consume the entire chain, the unused mbufs are moved * to the replacement chain. */ while (len > 0) { /* * Something is seriously wrong if we received a frame * larger than the chain. Drop it. */ if (m == NULL) { sc->vtnet_stats.rx_frame_too_large++; return (EMSGSIZE); } /* We always allocate the same cluster size. */ KASSERT(m->m_len == clsize, ("%s: mbuf size %d is not the cluster size %d", __func__, m->m_len, clsize)); m->m_len = MIN(m->m_len, len); len -= m->m_len; m_prev = m; m = m->m_next; nreplace++; } KASSERT(nreplace <= sc->vtnet_rx_nmbufs, ("%s: too many replacement mbufs %d max %d", __func__, nreplace, sc->vtnet_rx_nmbufs)); m_new = vtnet_rx_alloc_buf(sc, nreplace, &m_tail); if (m_new == NULL) { m_prev->m_len = clsize; return (ENOBUFS); } /* * Move any unused mbufs from the received chain onto the end * of the new chain. */ if (m_prev->m_next != NULL) { m_tail->m_next = m_prev->m_next; m_prev->m_next = NULL; } error = vtnet_rxq_enqueue_buf(rxq, m_new); if (error) { /* * BAD! We could not enqueue the replacement mbuf chain. We * must restore the m0 chain to the original state if it was * modified so we can subsequently discard it. * * NOTE: The replacement is suppose to be an identical copy * to the one just dequeued so this is an unexpected error. */ sc->vtnet_stats.rx_enq_replacement_failed++; if (m_tail->m_next != NULL) { m_prev->m_next = m_tail->m_next; m_tail->m_next = NULL; } m_prev->m_len = clsize; m_freem(m_new); } return (error); } static int vtnet_rxq_replace_buf(struct vtnet_rxq *rxq, struct mbuf *m, int len) { struct vtnet_softc *sc; struct mbuf *m_new; int error; sc = rxq->vtnrx_sc; KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, ("%s: chained mbuf without LRO_NOMRG", __func__)); if (m->m_next == NULL) { /* Fast-path for the common case of just one mbuf. */ if (m->m_len < len) return (EINVAL); m_new = vtnet_rx_alloc_buf(sc, 1, NULL); if (m_new == NULL) return (ENOBUFS); error = vtnet_rxq_enqueue_buf(rxq, m_new); if (error) { /* * The new mbuf is suppose to be an identical * copy of the one just dequeued so this is an * unexpected error. */ m_freem(m_new); sc->vtnet_stats.rx_enq_replacement_failed++; } else m->m_len = len; } else error = vtnet_rxq_replace_lro_nomgr_buf(rxq, m, len); return (error); } static int vtnet_rxq_enqueue_buf(struct vtnet_rxq *rxq, struct mbuf *m) { struct vtnet_softc *sc; struct sglist *sg; struct vtnet_rx_header *rxhdr; uint8_t *mdata; int offset, error; sc = rxq->vtnrx_sc; sg = rxq->vtnrx_sg; mdata = mtod(m, uint8_t *); VTNET_RXQ_LOCK_ASSERT(rxq); KASSERT(sc->vtnet_flags & VTNET_FLAG_LRO_NOMRG || m->m_next == NULL, ("%s: chained mbuf without LRO_NOMRG", __func__)); KASSERT(m->m_len == sc->vtnet_rx_clsize, ("%s: unexpected cluster size %d/%d", __func__, m->m_len, sc->vtnet_rx_clsize)); sglist_reset(sg); if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { MPASS(sc->vtnet_hdr_size == sizeof(struct virtio_net_hdr)); rxhdr = (struct vtnet_rx_header *) mdata; sglist_append(sg, &rxhdr->vrh_hdr, sc->vtnet_hdr_size); offset = sizeof(struct vtnet_rx_header); } else offset = 0; sglist_append(sg, mdata + offset, m->m_len - offset); if (m->m_next != NULL) { error = sglist_append_mbuf(sg, m->m_next); MPASS(error == 0); } error = virtqueue_enqueue(rxq->vtnrx_vq, m, sg, 0, sg->sg_nseg); return (error); } static int vtnet_rxq_new_buf(struct vtnet_rxq *rxq) { struct vtnet_softc *sc; struct mbuf *m; int error; sc = rxq->vtnrx_sc; m = vtnet_rx_alloc_buf(sc, sc->vtnet_rx_nmbufs, NULL); if (m == NULL) return (ENOBUFS); error = vtnet_rxq_enqueue_buf(rxq, m); if (error) m_freem(m); return (error); } /* * Use the checksum offset in the VirtIO header to set the * correct CSUM_* flags. */ static int vtnet_rxq_csum_by_offset(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; #if defined(INET) || defined(INET6) int offset = hdr->csum_start + hdr->csum_offset; #endif sc = rxq->vtnrx_sc; /* Only do a basic sanity check on the offset. */ switch (eth_type) { #if defined(INET) case ETHERTYPE_IP: if (__predict_false(offset < ip_start + sizeof(struct ip))) return (1); break; #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(offset < ip_start + sizeof(struct ip6_hdr))) return (1); break; #endif default: sc->vtnet_stats.rx_csum_bad_ethtype++; return (1); } /* * Use the offset to determine the appropriate CSUM_* flags. This is * a bit dirty, but we can get by with it since the checksum offsets * happen to be different. We assume the host host does not do IPv4 * header checksum offloading. */ switch (hdr->csum_offset) { case offsetof(struct udphdr, uh_sum): case offsetof(struct tcphdr, th_sum): m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case offsetof(struct sctphdr, checksum): m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: sc->vtnet_stats.rx_csum_bad_offset++; return (1); } return (0); } static int vtnet_rxq_csum_by_parse(struct vtnet_rxq *rxq, struct mbuf *m, uint16_t eth_type, int ip_start, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; int offset, proto; sc = rxq->vtnrx_sc; switch (eth_type) { #if defined(INET) case ETHERTYPE_IP: { struct ip *ip; if (__predict_false(m->m_len < ip_start + sizeof(struct ip))) return (1); ip = (struct ip *)(m->m_data + ip_start); proto = ip->ip_p; offset = ip_start + (ip->ip_hl << 2); break; } #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(m->m_len < ip_start + sizeof(struct ip6_hdr))) return (1); offset = ip6_lasthdr(m, ip_start, IPPROTO_IPV6, &proto); if (__predict_false(offset < 0)) return (1); break; #endif default: sc->vtnet_stats.rx_csum_bad_ethtype++; return (1); } switch (proto) { case IPPROTO_TCP: if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case IPPROTO_UDP: if (__predict_false(m->m_len < offset + sizeof(struct udphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; break; case IPPROTO_SCTP: if (__predict_false(m->m_len < offset + sizeof(struct sctphdr))) return (1); m->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: /* * For the remaining protocols, FreeBSD does not support * checksum offloading, so the checksum will be recomputed. */ #if 0 if_printf(sc->vtnet_ifp, "cksum offload of unsupported " "protocol eth_type=%#x proto=%d csum_start=%d " "csum_offset=%d\n", __func__, eth_type, proto, hdr->csum_start, hdr->csum_offset); #endif break; } return (0); } /* * Set the appropriate CSUM_* flags. Unfortunately, the information * provided is not directly useful to us. The VirtIO header gives the * offset of the checksum, which is all Linux needs, but this is not * how FreeBSD does things. We are forced to peek inside the packet * a bit. * * It would be nice if VirtIO gave us the L4 protocol or if FreeBSD * could accept the offsets and let the stack figure it out. */ static int vtnet_rxq_csum(struct vtnet_rxq *rxq, struct mbuf *m, struct virtio_net_hdr *hdr) { struct ether_header *eh; struct ether_vlan_header *evh; uint16_t eth_type; int offset, error; eh = mtod(m, struct ether_header *); eth_type = ntohs(eh->ether_type); if (eth_type == ETHERTYPE_VLAN) { /* BMV: We should handle nested VLAN tags too. */ evh = mtod(m, struct ether_vlan_header *); eth_type = ntohs(evh->evl_proto); offset = sizeof(struct ether_vlan_header); } else offset = sizeof(struct ether_header); if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) error = vtnet_rxq_csum_by_offset(rxq, m, eth_type, offset, hdr); else error = vtnet_rxq_csum_by_parse(rxq, m, eth_type, offset, hdr); return (error); } static void vtnet_rxq_discard_merged_bufs(struct vtnet_rxq *rxq, int nbufs) { struct mbuf *m; while (--nbufs > 0) { m = virtqueue_dequeue(rxq->vtnrx_vq, NULL); if (m == NULL) break; vtnet_rxq_discard_buf(rxq, m); } } static void vtnet_rxq_discard_buf(struct vtnet_rxq *rxq, struct mbuf *m) { int error; /* * Requeue the discarded mbuf. This should always be successful * since it was just dequeued. */ error = vtnet_rxq_enqueue_buf(rxq, m); KASSERT(error == 0, ("%s: cannot requeue discarded mbuf %d", __func__, error)); } static int vtnet_rxq_merged_eof(struct vtnet_rxq *rxq, struct mbuf *m_head, int nbufs) { struct vtnet_softc *sc; struct virtqueue *vq; struct mbuf *m, *m_tail; int len; sc = rxq->vtnrx_sc; vq = rxq->vtnrx_vq; m_tail = m_head; while (--nbufs > 0) { m = virtqueue_dequeue(vq, &len); if (m == NULL) { rxq->vtnrx_stats.vrxs_ierrors++; goto fail; } if (vtnet_rxq_new_buf(rxq) != 0) { rxq->vtnrx_stats.vrxs_iqdrops++; vtnet_rxq_discard_buf(rxq, m); if (nbufs > 1) vtnet_rxq_discard_merged_bufs(rxq, nbufs); goto fail; } if (m->m_len < len) len = m->m_len; m->m_len = len; m->m_flags &= ~M_PKTHDR; m_head->m_pkthdr.len += len; m_tail->m_next = m; m_tail = m; } return (0); fail: sc->vtnet_stats.rx_mergeable_failed++; m_freem(m_head); return (1); } static void vtnet_rxq_input(struct vtnet_rxq *rxq, struct mbuf *m, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; struct ifnet *ifp; struct ether_header *eh; sc = rxq->vtnrx_sc; ifp = sc->vtnet_ifp; if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { eh = mtod(m, struct ether_header *); if (eh->ether_type == htons(ETHERTYPE_VLAN)) { vtnet_vlan_tag_remove(m); /* * With the 802.1Q header removed, update the * checksum starting location accordingly. */ if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) hdr->csum_start -= ETHER_VLAN_ENCAP_LEN; } } m->m_pkthdr.flowid = rxq->vtnrx_id; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); /* * BMV: FreeBSD does not have the UNNECESSARY and PARTIAL checksum * distinction that Linux does. Need to reevaluate if performing * offloading for the NEEDS_CSUM case is really appropriate. */ if (hdr->flags & (VIRTIO_NET_HDR_F_NEEDS_CSUM | VIRTIO_NET_HDR_F_DATA_VALID)) { if (vtnet_rxq_csum(rxq, m, hdr) == 0) rxq->vtnrx_stats.vrxs_csum++; else rxq->vtnrx_stats.vrxs_csum_failed++; } rxq->vtnrx_stats.vrxs_ipackets++; rxq->vtnrx_stats.vrxs_ibytes += m->m_pkthdr.len; VTNET_RXQ_UNLOCK(rxq); (*ifp->if_input)(ifp, m); VTNET_RXQ_LOCK(rxq); } static int vtnet_rxq_eof(struct vtnet_rxq *rxq) { struct virtio_net_hdr lhdr, *hdr; struct vtnet_softc *sc; struct ifnet *ifp; struct virtqueue *vq; struct mbuf *m; struct virtio_net_hdr_mrg_rxbuf *mhdr; int len, deq, nbufs, adjsz, count; sc = rxq->vtnrx_sc; vq = rxq->vtnrx_vq; ifp = sc->vtnet_ifp; hdr = &lhdr; deq = 0; count = sc->vtnet_rx_process_limit; VTNET_RXQ_LOCK_ASSERT(rxq); #ifdef DEV_NETMAP if (netmap_rx_irq(ifp, 0, &deq)) { return (FALSE); } #endif /* DEV_NETMAP */ while (count-- > 0) { m = virtqueue_dequeue(vq, &len); if (m == NULL) break; deq++; if (len < sc->vtnet_hdr_size + ETHER_HDR_LEN) { rxq->vtnrx_stats.vrxs_ierrors++; vtnet_rxq_discard_buf(rxq, m); continue; } if ((sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS) == 0) { nbufs = 1; adjsz = sizeof(struct vtnet_rx_header); /* * Account for our pad inserted between the header * and the actual start of the frame. */ len += VTNET_RX_HEADER_PAD; } else { mhdr = mtod(m, struct virtio_net_hdr_mrg_rxbuf *); nbufs = mhdr->num_buffers; adjsz = sizeof(struct virtio_net_hdr_mrg_rxbuf); } if (vtnet_rxq_replace_buf(rxq, m, len) != 0) { rxq->vtnrx_stats.vrxs_iqdrops++; vtnet_rxq_discard_buf(rxq, m); if (nbufs > 1) vtnet_rxq_discard_merged_bufs(rxq, nbufs); continue; } m->m_pkthdr.len = len; m->m_pkthdr.rcvif = ifp; m->m_pkthdr.csum_flags = 0; if (nbufs > 1) { /* Dequeue the rest of chain. */ if (vtnet_rxq_merged_eof(rxq, m, nbufs) != 0) continue; } /* * Save copy of header before we strip it. For both mergeable * and non-mergeable, the header is at the beginning of the * mbuf data. We no longer need num_buffers, so always use a * regular header. * * BMV: Is this memcpy() expensive? We know the mbuf data is * still valid even after the m_adj(). */ memcpy(hdr, mtod(m, void *), sizeof(struct virtio_net_hdr)); m_adj(m, adjsz); vtnet_rxq_input(rxq, m, hdr); /* Must recheck after dropping the Rx lock. */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; } if (deq > 0) virtqueue_notify(vq); return (count > 0 ? 0 : EAGAIN); } static void vtnet_rx_vq_intr(void *xrxq) { struct vtnet_softc *sc; struct vtnet_rxq *rxq; struct ifnet *ifp; int tries, more; rxq = xrxq; sc = rxq->vtnrx_sc; ifp = sc->vtnet_ifp; tries = 0; if (__predict_false(rxq->vtnrx_id >= sc->vtnet_act_vq_pairs)) { /* * Ignore this interrupt. Either this is a spurious interrupt * or multiqueue without per-VQ MSIX so every queue needs to * be polled (a brain dead configuration we could try harder * to avoid). */ vtnet_rxq_disable_intr(rxq); return; } VTNET_RXQ_LOCK(rxq); again: if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { VTNET_RXQ_UNLOCK(rxq); return; } more = vtnet_rxq_eof(rxq); if (more || vtnet_rxq_enable_intr(rxq) != 0) { if (!more) vtnet_rxq_disable_intr(rxq); /* * This is an occasional condition or race (when !more), * so retry a few times before scheduling the taskqueue. */ if (tries++ < VTNET_INTR_DISABLE_RETRIES) goto again; VTNET_RXQ_UNLOCK(rxq); rxq->vtnrx_stats.vrxs_rescheduled++; taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); } else VTNET_RXQ_UNLOCK(rxq); } static void vtnet_rxq_tq_intr(void *xrxq, int pending) { struct vtnet_softc *sc; struct vtnet_rxq *rxq; struct ifnet *ifp; int more; rxq = xrxq; sc = rxq->vtnrx_sc; ifp = sc->vtnet_ifp; VTNET_RXQ_LOCK(rxq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { VTNET_RXQ_UNLOCK(rxq); return; } more = vtnet_rxq_eof(rxq); if (more || vtnet_rxq_enable_intr(rxq) != 0) { if (!more) vtnet_rxq_disable_intr(rxq); rxq->vtnrx_stats.vrxs_rescheduled++; taskqueue_enqueue(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); } VTNET_RXQ_UNLOCK(rxq); } static int vtnet_txq_below_threshold(struct vtnet_txq *txq) { struct vtnet_softc *sc; struct virtqueue *vq; sc = txq->vtntx_sc; vq = txq->vtntx_vq; return (virtqueue_nfree(vq) <= sc->vtnet_tx_intr_thresh); } static int vtnet_txq_notify(struct vtnet_txq *txq) { struct virtqueue *vq; vq = txq->vtntx_vq; txq->vtntx_watchdog = VTNET_TX_TIMEOUT; virtqueue_notify(vq); if (vtnet_txq_enable_intr(txq) == 0) return (0); /* * Drain frames that were completed since last checked. If this * causes the queue to go above the threshold, the caller should * continue transmitting. */ if (vtnet_txq_eof(txq) != 0 && vtnet_txq_below_threshold(txq) == 0) { virtqueue_disable_intr(vq); return (1); } return (0); } static void vtnet_txq_free_mbufs(struct vtnet_txq *txq) { struct virtqueue *vq; struct vtnet_tx_header *txhdr; int last; vq = txq->vtntx_vq; last = 0; while ((txhdr = virtqueue_drain(vq, &last)) != NULL) { m_freem(txhdr->vth_mbuf); uma_zfree(vtnet_tx_header_zone, txhdr); } KASSERT(virtqueue_empty(vq), ("%s: mbufs remaining in tx queue %p", __func__, txq)); } /* * BMV: Much of this can go away once we finally have offsets in * the mbuf packet header. Bug andre@. */ static int vtnet_txq_offload_ctx(struct vtnet_txq *txq, struct mbuf *m, int *etype, int *proto, int *start) { struct vtnet_softc *sc; struct ether_vlan_header *evh; int offset; sc = txq->vtntx_sc; evh = mtod(m, struct ether_vlan_header *); if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { /* BMV: We should handle nested VLAN tags too. */ *etype = ntohs(evh->evl_proto); offset = sizeof(struct ether_vlan_header); } else { *etype = ntohs(evh->evl_encap_proto); offset = sizeof(struct ether_header); } switch (*etype) { #if defined(INET) case ETHERTYPE_IP: { struct ip *ip, iphdr; if (__predict_false(m->m_len < offset + sizeof(struct ip))) { m_copydata(m, offset, sizeof(struct ip), (caddr_t) &iphdr); ip = &iphdr; } else ip = (struct ip *)(m->m_data + offset); *proto = ip->ip_p; *start = offset + (ip->ip_hl << 2); break; } #endif #if defined(INET6) case ETHERTYPE_IPV6: *proto = -1; *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); /* Assert the network stack sent us a valid packet. */ KASSERT(*start > offset, ("%s: mbuf %p start %d offset %d proto %d", __func__, m, *start, offset, *proto)); break; #endif default: sc->vtnet_stats.tx_csum_bad_ethtype++; return (EINVAL); } return (0); } static int vtnet_txq_offload_tso(struct vtnet_txq *txq, struct mbuf *m, int eth_type, int offset, struct virtio_net_hdr *hdr) { static struct timeval lastecn; static int curecn; struct vtnet_softc *sc; struct tcphdr *tcp, tcphdr; sc = txq->vtntx_sc; if (__predict_false(m->m_len < offset + sizeof(struct tcphdr))) { m_copydata(m, offset, sizeof(struct tcphdr), (caddr_t) &tcphdr); tcp = &tcphdr; } else tcp = (struct tcphdr *)(m->m_data + offset); hdr->hdr_len = offset + (tcp->th_off << 2); hdr->gso_size = m->m_pkthdr.tso_segsz; hdr->gso_type = eth_type == ETHERTYPE_IP ? VIRTIO_NET_HDR_GSO_TCPV4 : VIRTIO_NET_HDR_GSO_TCPV6; if (tcp->th_flags & TH_CWR) { /* * Drop if VIRTIO_NET_F_HOST_ECN was not negotiated. In FreeBSD, * ECN support is not on a per-interface basis, but globally via * the net.inet.tcp.ecn.enable sysctl knob. The default is off. */ if ((sc->vtnet_flags & VTNET_FLAG_TSO_ECN) == 0) { if (ppsratecheck(&lastecn, &curecn, 1)) if_printf(sc->vtnet_ifp, "TSO with ECN not negotiated with host\n"); return (ENOTSUP); } hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN; } txq->vtntx_stats.vtxs_tso++; return (0); } static struct mbuf * vtnet_txq_offload(struct vtnet_txq *txq, struct mbuf *m, struct virtio_net_hdr *hdr) { struct vtnet_softc *sc; int flags, etype, csum_start, proto, error; sc = txq->vtntx_sc; flags = m->m_pkthdr.csum_flags; error = vtnet_txq_offload_ctx(txq, m, &etype, &proto, &csum_start); if (error) goto drop; if ((etype == ETHERTYPE_IP && flags & VTNET_CSUM_OFFLOAD) || (etype == ETHERTYPE_IPV6 && flags & VTNET_CSUM_OFFLOAD_IPV6)) { /* * We could compare the IP protocol vs the CSUM_ flag too, * but that really should not be necessary. */ hdr->flags |= VIRTIO_NET_HDR_F_NEEDS_CSUM; hdr->csum_start = csum_start; hdr->csum_offset = m->m_pkthdr.csum_data; txq->vtntx_stats.vtxs_csum++; } if (flags & CSUM_TSO) { if (__predict_false(proto != IPPROTO_TCP)) { /* Likely failed to correctly parse the mbuf. */ sc->vtnet_stats.tx_tso_not_tcp++; goto drop; } KASSERT(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM, ("%s: mbuf %p TSO without checksum offload %#x", __func__, m, flags)); error = vtnet_txq_offload_tso(txq, m, etype, csum_start, hdr); if (error) goto drop; } return (m); drop: m_freem(m); return (NULL); } static int vtnet_txq_enqueue_buf(struct vtnet_txq *txq, struct mbuf **m_head, struct vtnet_tx_header *txhdr) { struct vtnet_softc *sc; struct virtqueue *vq; struct sglist *sg; struct mbuf *m; int error; sc = txq->vtntx_sc; vq = txq->vtntx_vq; sg = txq->vtntx_sg; m = *m_head; sglist_reset(sg); error = sglist_append(sg, &txhdr->vth_uhdr, sc->vtnet_hdr_size); KASSERT(error == 0 && sg->sg_nseg == 1, ("%s: error %d adding header to sglist", __func__, error)); error = sglist_append_mbuf(sg, m); if (error) { m = m_defrag(m, M_NOWAIT); if (m == NULL) goto fail; *m_head = m; sc->vtnet_stats.tx_defragged++; error = sglist_append_mbuf(sg, m); if (error) goto fail; } txhdr->vth_mbuf = m; error = virtqueue_enqueue(vq, txhdr, sg, sg->sg_nseg, 0); return (error); fail: sc->vtnet_stats.tx_defrag_failed++; m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } static int vtnet_txq_encap(struct vtnet_txq *txq, struct mbuf **m_head) { struct vtnet_tx_header *txhdr; struct virtio_net_hdr *hdr; struct mbuf *m; int error; m = *m_head; M_ASSERTPKTHDR(m); txhdr = uma_zalloc(vtnet_tx_header_zone, M_NOWAIT | M_ZERO); if (txhdr == NULL) { m_freem(m); *m_head = NULL; return (ENOMEM); } /* * Always use the non-mergeable header, regardless if the feature * was negotiated. For transmit, num_buffers is always zero. The * vtnet_hdr_size is used to enqueue the correct header size. */ hdr = &txhdr->vth_uhdr.hdr; if (m->m_flags & M_VLANTAG) { m = ether_vlanencap(m, m->m_pkthdr.ether_vtag); if ((*m_head = m) == NULL) { error = ENOBUFS; goto fail; } m->m_flags &= ~M_VLANTAG; } if (m->m_pkthdr.csum_flags & VTNET_CSUM_ALL_OFFLOAD) { m = vtnet_txq_offload(txq, m, hdr); if ((*m_head = m) == NULL) { error = ENOBUFS; goto fail; } } error = vtnet_txq_enqueue_buf(txq, m_head, txhdr); if (error == 0) return (0); fail: uma_zfree(vtnet_tx_header_zone, txhdr); return (error); } #ifdef VTNET_LEGACY_TX static void vtnet_start_locked(struct vtnet_txq *txq, struct ifnet *ifp) { struct vtnet_softc *sc; struct virtqueue *vq; struct mbuf *m0; int tries, enq; sc = txq->vtntx_sc; vq = txq->vtntx_vq; tries = 0; VTNET_TXQ_LOCK_ASSERT(txq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->vtnet_link_active == 0) return; vtnet_txq_eof(txq); again: enq = 0; while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if (virtqueue_full(vq)) break; IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); if (m0 == NULL) break; if (vtnet_txq_encap(txq, &m0) != 0) { if (m0 != NULL) IFQ_DRV_PREPEND(&ifp->if_snd, m0); break; } enq++; ETHER_BPF_MTAP(ifp, m0); } if (enq > 0 && vtnet_txq_notify(txq) != 0) { if (tries++ < VTNET_NOTIFY_RETRIES) goto again; txq->vtntx_stats.vtxs_rescheduled++; taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask); } } static void vtnet_start(struct ifnet *ifp) { struct vtnet_softc *sc; struct vtnet_txq *txq; sc = ifp->if_softc; txq = &sc->vtnet_txqs[0]; VTNET_TXQ_LOCK(txq); vtnet_start_locked(txq, ifp); VTNET_TXQ_UNLOCK(txq); } #else /* !VTNET_LEGACY_TX */ static int vtnet_txq_mq_start_locked(struct vtnet_txq *txq, struct mbuf *m) { struct vtnet_softc *sc; struct virtqueue *vq; struct buf_ring *br; struct ifnet *ifp; int enq, tries, error; sc = txq->vtntx_sc; vq = txq->vtntx_vq; br = txq->vtntx_br; ifp = sc->vtnet_ifp; tries = 0; error = 0; VTNET_TXQ_LOCK_ASSERT(txq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->vtnet_link_active == 0) { if (m != NULL) error = drbr_enqueue(ifp, br, m); return (error); } if (m != NULL) { error = drbr_enqueue(ifp, br, m); if (error) return (error); } vtnet_txq_eof(txq); again: enq = 0; while ((m = drbr_peek(ifp, br)) != NULL) { if (virtqueue_full(vq)) { drbr_putback(ifp, br, m); break; } if (vtnet_txq_encap(txq, &m) != 0) { if (m != NULL) drbr_putback(ifp, br, m); else drbr_advance(ifp, br); break; } drbr_advance(ifp, br); enq++; ETHER_BPF_MTAP(ifp, m); } if (enq > 0 && vtnet_txq_notify(txq) != 0) { if (tries++ < VTNET_NOTIFY_RETRIES) goto again; txq->vtntx_stats.vtxs_rescheduled++; taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_intrtask); } return (0); } static int vtnet_txq_mq_start(struct ifnet *ifp, struct mbuf *m) { struct vtnet_softc *sc; struct vtnet_txq *txq; int i, npairs, error; sc = ifp->if_softc; npairs = sc->vtnet_act_vq_pairs; /* check if flowid is set */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) i = m->m_pkthdr.flowid % npairs; else i = curcpu % npairs; txq = &sc->vtnet_txqs[i]; if (VTNET_TXQ_TRYLOCK(txq) != 0) { error = vtnet_txq_mq_start_locked(txq, m); VTNET_TXQ_UNLOCK(txq); } else { error = drbr_enqueue(ifp, txq->vtntx_br, m); taskqueue_enqueue(txq->vtntx_tq, &txq->vtntx_defrtask); } return (error); } static void vtnet_txq_tq_deferred(void *xtxq, int pending) { struct vtnet_softc *sc; struct vtnet_txq *txq; txq = xtxq; sc = txq->vtntx_sc; VTNET_TXQ_LOCK(txq); if (!drbr_empty(sc->vtnet_ifp, txq->vtntx_br)) vtnet_txq_mq_start_locked(txq, NULL); VTNET_TXQ_UNLOCK(txq); } #endif /* VTNET_LEGACY_TX */ static void vtnet_txq_start(struct vtnet_txq *txq) { struct vtnet_softc *sc; struct ifnet *ifp; sc = txq->vtntx_sc; ifp = sc->vtnet_ifp; #ifdef VTNET_LEGACY_TX if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vtnet_start_locked(txq, ifp); #else if (!drbr_empty(ifp, txq->vtntx_br)) vtnet_txq_mq_start_locked(txq, NULL); #endif } static void vtnet_txq_tq_intr(void *xtxq, int pending) { struct vtnet_softc *sc; struct vtnet_txq *txq; struct ifnet *ifp; txq = xtxq; sc = txq->vtntx_sc; ifp = sc->vtnet_ifp; VTNET_TXQ_LOCK(txq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { VTNET_TXQ_UNLOCK(txq); return; } vtnet_txq_eof(txq); vtnet_txq_start(txq); VTNET_TXQ_UNLOCK(txq); } static int vtnet_txq_eof(struct vtnet_txq *txq) { struct virtqueue *vq; struct vtnet_tx_header *txhdr; struct mbuf *m; int deq; vq = txq->vtntx_vq; deq = 0; VTNET_TXQ_LOCK_ASSERT(txq); #ifdef DEV_NETMAP if (netmap_tx_irq(txq->vtntx_sc->vtnet_ifp, txq->vtntx_id)) { virtqueue_disable_intr(vq); // XXX luigi return 0; // XXX or 1 ? } #endif /* DEV_NETMAP */ while ((txhdr = virtqueue_dequeue(vq, NULL)) != NULL) { m = txhdr->vth_mbuf; deq++; txq->vtntx_stats.vtxs_opackets++; txq->vtntx_stats.vtxs_obytes += m->m_pkthdr.len; if (m->m_flags & M_MCAST) txq->vtntx_stats.vtxs_omcasts++; m_freem(m); uma_zfree(vtnet_tx_header_zone, txhdr); } if (virtqueue_empty(vq)) txq->vtntx_watchdog = 0; return (deq); } static void vtnet_tx_vq_intr(void *xtxq) { struct vtnet_softc *sc; struct vtnet_txq *txq; struct ifnet *ifp; txq = xtxq; sc = txq->vtntx_sc; ifp = sc->vtnet_ifp; if (__predict_false(txq->vtntx_id >= sc->vtnet_act_vq_pairs)) { /* * Ignore this interrupt. Either this is a spurious interrupt * or multiqueue without per-VQ MSIX so every queue needs to * be polled (a brain dead configuration we could try harder * to avoid). */ vtnet_txq_disable_intr(txq); return; } VTNET_TXQ_LOCK(txq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { VTNET_TXQ_UNLOCK(txq); return; } vtnet_txq_eof(txq); vtnet_txq_start(txq); VTNET_TXQ_UNLOCK(txq); } static void vtnet_tx_start_all(struct vtnet_softc *sc) { struct vtnet_txq *txq; int i; VTNET_CORE_LOCK_ASSERT(sc); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; VTNET_TXQ_LOCK(txq); vtnet_txq_start(txq); VTNET_TXQ_UNLOCK(txq); } } #ifndef VTNET_LEGACY_TX static void vtnet_qflush(struct ifnet *ifp) { struct vtnet_softc *sc; struct vtnet_txq *txq; struct mbuf *m; int i; sc = ifp->if_softc; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; VTNET_TXQ_LOCK(txq); while ((m = buf_ring_dequeue_sc(txq->vtntx_br)) != NULL) m_freem(m); VTNET_TXQ_UNLOCK(txq); } if_qflush(ifp); } #endif static int vtnet_watchdog(struct vtnet_txq *txq) { struct ifnet *ifp; ifp = txq->vtntx_sc->vtnet_ifp; VTNET_TXQ_LOCK(txq); if (txq->vtntx_watchdog == 1) { /* * Only drain completed frames if the watchdog is about to * expire. If any frames were drained, there may be enough * free descriptors now available to transmit queued frames. * In that case, the timer will immediately be decremented * below, but the timeout is generous enough that should not * be a problem. */ if (vtnet_txq_eof(txq) != 0) vtnet_txq_start(txq); } if (txq->vtntx_watchdog == 0 || --txq->vtntx_watchdog) { VTNET_TXQ_UNLOCK(txq); return (0); } VTNET_TXQ_UNLOCK(txq); if_printf(ifp, "watchdog timeout on queue %d\n", txq->vtntx_id); return (1); } static void vtnet_accum_stats(struct vtnet_softc *sc, struct vtnet_rxq_stats *rxacc, struct vtnet_txq_stats *txacc) { bzero(rxacc, sizeof(struct vtnet_rxq_stats)); bzero(txacc, sizeof(struct vtnet_txq_stats)); for (int i = 0; i < sc->vtnet_max_vq_pairs; i++) { struct vtnet_rxq_stats *rxst; struct vtnet_txq_stats *txst; rxst = &sc->vtnet_rxqs[i].vtnrx_stats; rxacc->vrxs_ipackets += rxst->vrxs_ipackets; rxacc->vrxs_ibytes += rxst->vrxs_ibytes; rxacc->vrxs_iqdrops += rxst->vrxs_iqdrops; rxacc->vrxs_csum += rxst->vrxs_csum; rxacc->vrxs_csum_failed += rxst->vrxs_csum_failed; rxacc->vrxs_rescheduled += rxst->vrxs_rescheduled; txst = &sc->vtnet_txqs[i].vtntx_stats; txacc->vtxs_opackets += txst->vtxs_opackets; txacc->vtxs_obytes += txst->vtxs_obytes; txacc->vtxs_csum += txst->vtxs_csum; txacc->vtxs_tso += txst->vtxs_tso; txacc->vtxs_rescheduled += txst->vtxs_rescheduled; } } static uint64_t vtnet_get_counter(if_t ifp, ift_counter cnt) { struct vtnet_softc *sc; struct vtnet_rxq_stats rxaccum; struct vtnet_txq_stats txaccum; sc = if_getsoftc(ifp); vtnet_accum_stats(sc, &rxaccum, &txaccum); switch (cnt) { case IFCOUNTER_IPACKETS: return (rxaccum.vrxs_ipackets); case IFCOUNTER_IQDROPS: return (rxaccum.vrxs_iqdrops); case IFCOUNTER_IERRORS: return (rxaccum.vrxs_ierrors); case IFCOUNTER_OPACKETS: return (txaccum.vtxs_opackets); #ifndef VTNET_LEGACY_TX case IFCOUNTER_OBYTES: return (txaccum.vtxs_obytes); case IFCOUNTER_OMCASTS: return (txaccum.vtxs_omcasts); #endif default: return (if_get_counter_default(ifp, cnt)); } } static void vtnet_tick(void *xsc) { struct vtnet_softc *sc; struct ifnet *ifp; int i, timedout; sc = xsc; ifp = sc->vtnet_ifp; timedout = 0; VTNET_CORE_LOCK_ASSERT(sc); for (i = 0; i < sc->vtnet_act_vq_pairs; i++) timedout |= vtnet_watchdog(&sc->vtnet_txqs[i]); if (timedout != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vtnet_init_locked(sc); } else callout_schedule(&sc->vtnet_tick_ch, hz); } static void vtnet_start_taskqueues(struct vtnet_softc *sc) { device_t dev; struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i, error; dev = sc->vtnet_dev; /* * Errors here are very difficult to recover from - we cannot * easily fail because, if this is during boot, we will hang * when freeing any successfully started taskqueues because * the scheduler isn't up yet. * * Most drivers just ignore the return value - it only fails * with ENOMEM so an error is not likely. */ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; error = taskqueue_start_threads(&rxq->vtnrx_tq, 1, PI_NET, "%s rxq %d", device_get_nameunit(dev), rxq->vtnrx_id); if (error) { device_printf(dev, "failed to start rx taskq %d\n", rxq->vtnrx_id); } txq = &sc->vtnet_txqs[i]; error = taskqueue_start_threads(&txq->vtntx_tq, 1, PI_NET, "%s txq %d", device_get_nameunit(dev), txq->vtntx_id); if (error) { device_printf(dev, "failed to start tx taskq %d\n", txq->vtntx_id); } } } static void vtnet_free_taskqueues(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; if (rxq->vtnrx_tq != NULL) { taskqueue_free(rxq->vtnrx_tq); rxq->vtnrx_vq = NULL; } txq = &sc->vtnet_txqs[i]; if (txq->vtntx_tq != NULL) { taskqueue_free(txq->vtntx_tq); txq->vtntx_tq = NULL; } } } static void vtnet_drain_taskqueues(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; if (rxq->vtnrx_tq != NULL) taskqueue_drain(rxq->vtnrx_tq, &rxq->vtnrx_intrtask); txq = &sc->vtnet_txqs[i]; if (txq->vtntx_tq != NULL) { taskqueue_drain(txq->vtntx_tq, &txq->vtntx_intrtask); #ifndef VTNET_LEGACY_TX taskqueue_drain(txq->vtntx_tq, &txq->vtntx_defrtask); #endif } } } static void vtnet_drain_rxtx_queues(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; #ifdef DEV_NETMAP if (nm_native_on(NA(sc->vtnet_ifp))) return; #endif /* DEV_NETMAP */ for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; vtnet_rxq_free_mbufs(rxq); txq = &sc->vtnet_txqs[i]; vtnet_txq_free_mbufs(txq); } } static void vtnet_stop_rendezvous(struct vtnet_softc *sc) { struct vtnet_rxq *rxq; struct vtnet_txq *txq; int i; /* * Lock and unlock the per-queue mutex so we known the stop * state is visible. Doing only the active queues should be * sufficient, but it does not cost much extra to do all the * queues. Note we hold the core mutex here too. */ for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; VTNET_RXQ_LOCK(rxq); VTNET_RXQ_UNLOCK(rxq); txq = &sc->vtnet_txqs[i]; VTNET_TXQ_LOCK(txq); VTNET_TXQ_UNLOCK(txq); } } static void vtnet_stop(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sc->vtnet_link_active = 0; callout_stop(&sc->vtnet_tick_ch); /* Only advisory. */ vtnet_disable_interrupts(sc); /* * Stop the host adapter. This resets it to the pre-initialized * state. It will not generate any interrupts until after it is * reinitialized. */ virtio_stop(dev); vtnet_stop_rendezvous(sc); /* Free any mbufs left in the virtqueues. */ vtnet_drain_rxtx_queues(sc); } static int vtnet_virtio_reinit(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; uint64_t features; int mask, error; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; features = sc->vtnet_features; mask = 0; #if defined(INET) mask |= IFCAP_RXCSUM; #endif #if defined (INET6) mask |= IFCAP_RXCSUM_IPV6; #endif /* * Re-negotiate with the host, removing any disabled receive * features. Transmit features are disabled only on our side * via if_capenable and if_hwassist. */ if (ifp->if_capabilities & mask) { /* * We require both IPv4 and IPv6 offloading to be enabled * in order to negotiated it: VirtIO does not distinguish * between the two. */ if ((ifp->if_capenable & mask) != mask) features &= ~VIRTIO_NET_F_GUEST_CSUM; } if (ifp->if_capabilities & IFCAP_LRO) { if ((ifp->if_capenable & IFCAP_LRO) == 0) features &= ~VTNET_LRO_FEATURES; } if (ifp->if_capabilities & IFCAP_VLAN_HWFILTER) { if ((ifp->if_capenable & IFCAP_VLAN_HWFILTER) == 0) features &= ~VIRTIO_NET_F_CTRL_VLAN; } error = virtio_reinit(dev, features); if (error) device_printf(dev, "virtio reinit error %d\n", error); return (error); } static void vtnet_init_rx_filters(struct vtnet_softc *sc) { struct ifnet *ifp; ifp = sc->vtnet_ifp; if (sc->vtnet_flags & VTNET_FLAG_CTRL_RX) { /* Restore promiscuous and all-multicast modes. */ vtnet_rx_filter(sc); /* Restore filtered MAC addresses. */ vtnet_rx_filter_mac(sc); } if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) vtnet_rx_filter_vlan(sc); } static int vtnet_init_rx_queues(struct vtnet_softc *sc) { device_t dev; struct vtnet_rxq *rxq; int i, clsize, error; dev = sc->vtnet_dev; /* * Use the new cluster size if one has been set (via a MTU * change). Otherwise, use the standard 2K clusters. * * BMV: It might make sense to use page sized clusters as * the default (depending on the features negotiated). */ if (sc->vtnet_rx_new_clsize != 0) { clsize = sc->vtnet_rx_new_clsize; sc->vtnet_rx_new_clsize = 0; } else clsize = MCLBYTES; sc->vtnet_rx_clsize = clsize; sc->vtnet_rx_nmbufs = VTNET_NEEDED_RX_MBUFS(sc, clsize); KASSERT(sc->vtnet_flags & VTNET_FLAG_MRG_RXBUFS || sc->vtnet_rx_nmbufs < sc->vtnet_rx_nsegs, ("%s: too many rx mbufs %d for %d segments", __func__, sc->vtnet_rx_nmbufs, sc->vtnet_rx_nsegs)); #ifdef DEV_NETMAP if (vtnet_netmap_init_rx_buffers(sc)) return 0; #endif /* DEV_NETMAP */ for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { rxq = &sc->vtnet_rxqs[i]; /* Hold the lock to satisfy asserts. */ VTNET_RXQ_LOCK(rxq); error = vtnet_rxq_populate(rxq); VTNET_RXQ_UNLOCK(rxq); if (error) { device_printf(dev, "cannot allocate mbufs for Rx queue %d\n", i); return (error); } } return (0); } static int vtnet_init_tx_queues(struct vtnet_softc *sc) { struct vtnet_txq *txq; int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) { txq = &sc->vtnet_txqs[i]; txq->vtntx_watchdog = 0; } return (0); } static int vtnet_init_rxtx_queues(struct vtnet_softc *sc) { int error; error = vtnet_init_rx_queues(sc); if (error) return (error); error = vtnet_init_tx_queues(sc); if (error) return (error); return (0); } static void vtnet_set_active_vq_pairs(struct vtnet_softc *sc) { device_t dev; int npairs; dev = sc->vtnet_dev; if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) { sc->vtnet_act_vq_pairs = 1; return; } npairs = sc->vtnet_requested_vq_pairs; if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) { device_printf(dev, "cannot set active queue pairs to %d\n", npairs); npairs = 1; } sc->vtnet_act_vq_pairs = npairs; } static int vtnet_reinit(struct vtnet_softc *sc) { struct ifnet *ifp; int error; ifp = sc->vtnet_ifp; /* Use the current MAC address. */ bcopy(IF_LLADDR(ifp), sc->vtnet_hwaddr, ETHER_ADDR_LEN); vtnet_set_hwaddr(sc); vtnet_set_active_vq_pairs(sc); ifp->if_hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) ifp->if_hwassist |= VTNET_CSUM_OFFLOAD; if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) ifp->if_hwassist |= VTNET_CSUM_OFFLOAD_IPV6; if (ifp->if_capenable & IFCAP_TSO4) ifp->if_hwassist |= CSUM_IP_TSO; if (ifp->if_capenable & IFCAP_TSO6) ifp->if_hwassist |= CSUM_IP6_TSO; if (sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) vtnet_init_rx_filters(sc); error = vtnet_init_rxtx_queues(sc); if (error) return (error); vtnet_enable_interrupts(sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; return (0); } static void vtnet_init_locked(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; vtnet_stop(sc); /* Reinitialize with the host. */ if (vtnet_virtio_reinit(sc) != 0) goto fail; if (vtnet_reinit(sc) != 0) goto fail; virtio_reinit_complete(dev); vtnet_update_link_status(sc); callout_reset(&sc->vtnet_tick_ch, hz, vtnet_tick, sc); return; fail: vtnet_stop(sc); } static void vtnet_init(void *xsc) { struct vtnet_softc *sc; sc = xsc; #ifdef DEV_NETMAP if (!NA(sc->vtnet_ifp)) { D("try to attach again"); vtnet_netmap_attach(sc); } #endif /* DEV_NETMAP */ VTNET_CORE_LOCK(sc); vtnet_init_locked(sc); VTNET_CORE_UNLOCK(sc); } static void vtnet_free_ctrl_vq(struct vtnet_softc *sc) { struct virtqueue *vq; vq = sc->vtnet_ctrl_vq; /* * The control virtqueue is only polled and therefore it should * already be empty. */ KASSERT(virtqueue_empty(vq), ("%s: ctrl vq %p not empty", __func__, vq)); } static void vtnet_exec_ctrl_cmd(struct vtnet_softc *sc, void *cookie, struct sglist *sg, int readable, int writable) { struct virtqueue *vq; vq = sc->vtnet_ctrl_vq; VTNET_CORE_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_VQ, ("%s: CTRL_VQ feature not negotiated", __func__)); if (!virtqueue_empty(vq)) return; if (virtqueue_enqueue(vq, cookie, sg, readable, writable) != 0) return; /* * Poll for the response, but the command is likely already * done when we return from the notify. */ virtqueue_notify(vq); virtqueue_poll(vq, NULL); } static int vtnet_ctrl_mac_cmd(struct vtnet_softc *sc, uint8_t *hwaddr) { struct virtio_net_ctrl_hdr hdr __aligned(2); struct sglist_seg segs[3]; struct sglist sg; uint8_t ack; int error; hdr.class = VIRTIO_NET_CTRL_MAC; hdr.cmd = VIRTIO_NET_CTRL_MAC_ADDR_SET; ack = VIRTIO_NET_ERR; sglist_init(&sg, 3, segs); error = 0; error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, hwaddr, ETHER_ADDR_LEN); error |= sglist_append(&sg, &ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("%s: error %d adding set MAC msg to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); return (ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_ctrl_mq_cmd(struct vtnet_softc *sc, uint16_t npairs) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr; uint8_t pad1; struct virtio_net_ctrl_mq mq; uint8_t pad2; uint8_t ack; } s __aligned(2); int error; s.hdr.class = VIRTIO_NET_CTRL_MQ; s.hdr.cmd = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET; s.mq.virtqueue_pairs = npairs; s.ack = VIRTIO_NET_ERR; sglist_init(&sg, 3, segs); error = 0; error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.mq, sizeof(struct virtio_net_ctrl_mq)); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("%s: error %d adding MQ message to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_ctrl_rx_cmd(struct vtnet_softc *sc, int cmd, int on) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr; uint8_t pad1; uint8_t onoff; uint8_t pad2; uint8_t ack; } s __aligned(2); int error; KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, ("%s: CTRL_RX feature not negotiated", __func__)); s.hdr.class = VIRTIO_NET_CTRL_RX; s.hdr.cmd = cmd; s.onoff = !!on; s.ack = VIRTIO_NET_ERR; sglist_init(&sg, 3, segs); error = 0; error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.onoff, sizeof(uint8_t)); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("%s: error %d adding Rx message to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static int vtnet_set_promisc(struct vtnet_softc *sc, int on) { return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_PROMISC, on)); } static int vtnet_set_allmulti(struct vtnet_softc *sc, int on) { return (vtnet_ctrl_rx_cmd(sc, VIRTIO_NET_CTRL_RX_ALLMULTI, on)); } /* * The device defaults to promiscuous mode for backwards compatibility. * Turn it off at attach time if possible. */ static void vtnet_attach_disable_promisc(struct vtnet_softc *sc) { struct ifnet *ifp; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK(sc); if ((sc->vtnet_flags & VTNET_FLAG_CTRL_RX) == 0) { ifp->if_flags |= IFF_PROMISC; } else if (vtnet_set_promisc(sc, 0) != 0) { ifp->if_flags |= IFF_PROMISC; device_printf(sc->vtnet_dev, "cannot disable default promiscuous mode\n"); } VTNET_CORE_UNLOCK(sc); } static void vtnet_rx_filter(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); if (vtnet_set_promisc(sc, ifp->if_flags & IFF_PROMISC) != 0) device_printf(dev, "cannot %s promiscuous mode\n", ifp->if_flags & IFF_PROMISC ? "enable" : "disable"); if (vtnet_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI) != 0) device_printf(dev, "cannot %s all-multicast mode\n", ifp->if_flags & IFF_ALLMULTI ? "enable" : "disable"); } static void vtnet_rx_filter_mac(struct vtnet_softc *sc) { struct virtio_net_ctrl_hdr hdr __aligned(2); struct vtnet_mac_filter *filter; struct sglist_seg segs[4]; struct sglist sg; struct ifnet *ifp; struct ifaddr *ifa; struct ifmultiaddr *ifma; int ucnt, mcnt, promisc, allmulti, error; uint8_t ack; ifp = sc->vtnet_ifp; filter = sc->vtnet_mac_filter; ucnt = 0; mcnt = 0; promisc = 0; allmulti = 0; VTNET_CORE_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_CTRL_RX, ("%s: CTRL_RX feature not negotiated", __func__)); /* Unicast MAC addresses: */ if_addr_rlock(ifp); TAILQ_FOREACH(ifa, &ifp->if_addrhead, ifa_link) { if (ifa->ifa_addr->sa_family != AF_LINK) continue; else if (memcmp(LLADDR((struct sockaddr_dl *)ifa->ifa_addr), sc->vtnet_hwaddr, ETHER_ADDR_LEN) == 0) continue; else if (ucnt == VTNET_MAX_MAC_ENTRIES) { promisc = 1; break; } bcopy(LLADDR((struct sockaddr_dl *)ifa->ifa_addr), &filter->vmf_unicast.macs[ucnt], ETHER_ADDR_LEN); ucnt++; } if_addr_runlock(ifp); if (promisc != 0) { filter->vmf_unicast.nentries = 0; if_printf(ifp, "more than %d MAC addresses assigned, " "falling back to promiscuous mode\n", VTNET_MAX_MAC_ENTRIES); } else filter->vmf_unicast.nentries = ucnt; /* Multicast MAC addresses: */ if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; else if (mcnt == VTNET_MAX_MAC_ENTRIES) { allmulti = 1; break; } bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &filter->vmf_multicast.macs[mcnt], ETHER_ADDR_LEN); mcnt++; } if_maddr_runlock(ifp); if (allmulti != 0) { filter->vmf_multicast.nentries = 0; if_printf(ifp, "more than %d multicast MAC addresses " "assigned, falling back to all-multicast mode\n", VTNET_MAX_MAC_ENTRIES); } else filter->vmf_multicast.nentries = mcnt; if (promisc != 0 && allmulti != 0) goto out; hdr.class = VIRTIO_NET_CTRL_MAC; hdr.cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; ack = VIRTIO_NET_ERR; sglist_init(&sg, 4, segs); error = 0; error |= sglist_append(&sg, &hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &filter->vmf_unicast, sizeof(uint32_t) + filter->vmf_unicast.nentries * ETHER_ADDR_LEN); error |= sglist_append(&sg, &filter->vmf_multicast, sizeof(uint32_t) + filter->vmf_multicast.nentries * ETHER_ADDR_LEN); error |= sglist_append(&sg, &ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 4, ("%s: error %d adding MAC filter msg to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &ack, &sg, sg.sg_nseg - 1, 1); if (ack != VIRTIO_NET_OK) if_printf(ifp, "error setting host MAC filter table\n"); out: if (promisc != 0 && vtnet_set_promisc(sc, 1) != 0) if_printf(ifp, "cannot enable promiscuous mode\n"); if (allmulti != 0 && vtnet_set_allmulti(sc, 1) != 0) if_printf(ifp, "cannot enable all-multicast mode\n"); } static int vtnet_exec_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) { struct sglist_seg segs[3]; struct sglist sg; struct { struct virtio_net_ctrl_hdr hdr; uint8_t pad1; uint16_t tag; uint8_t pad2; uint8_t ack; } s __aligned(2); int error; s.hdr.class = VIRTIO_NET_CTRL_VLAN; s.hdr.cmd = add ? VIRTIO_NET_CTRL_VLAN_ADD : VIRTIO_NET_CTRL_VLAN_DEL; s.tag = tag; s.ack = VIRTIO_NET_ERR; sglist_init(&sg, 3, segs); error = 0; error |= sglist_append(&sg, &s.hdr, sizeof(struct virtio_net_ctrl_hdr)); error |= sglist_append(&sg, &s.tag, sizeof(uint16_t)); error |= sglist_append(&sg, &s.ack, sizeof(uint8_t)); KASSERT(error == 0 && sg.sg_nseg == 3, ("%s: error %d adding VLAN message to sglist", __func__, error)); vtnet_exec_ctrl_cmd(sc, &s.ack, &sg, sg.sg_nseg - 1, 1); return (s.ack == VIRTIO_NET_OK ? 0 : EIO); } static void vtnet_rx_filter_vlan(struct vtnet_softc *sc) { uint32_t w; uint16_t tag; int i, bit; VTNET_CORE_LOCK_ASSERT(sc); KASSERT(sc->vtnet_flags & VTNET_FLAG_VLAN_FILTER, ("%s: VLAN_FILTER feature not negotiated", __func__)); /* Enable the filter for each configured VLAN. */ for (i = 0; i < VTNET_VLAN_FILTER_NWORDS; i++) { w = sc->vtnet_vlan_filter[i]; while ((bit = ffs(w) - 1) != -1) { w &= ~(1 << bit); tag = sizeof(w) * CHAR_BIT * i + bit; if (vtnet_exec_vlan_filter(sc, 1, tag) != 0) { device_printf(sc->vtnet_dev, "cannot enable VLAN %d filter\n", tag); } } } } static void vtnet_update_vlan_filter(struct vtnet_softc *sc, int add, uint16_t tag) { struct ifnet *ifp; int idx, bit; ifp = sc->vtnet_ifp; idx = (tag >> 5) & 0x7F; bit = tag & 0x1F; if (tag == 0 || tag > 4095) return; VTNET_CORE_LOCK(sc); if (add) sc->vtnet_vlan_filter[idx] |= (1 << bit); else sc->vtnet_vlan_filter[idx] &= ~(1 << bit); if (ifp->if_capenable & IFCAP_VLAN_HWFILTER && ifp->if_drv_flags & IFF_DRV_RUNNING && vtnet_exec_vlan_filter(sc, add, tag) != 0) { device_printf(sc->vtnet_dev, "cannot %s VLAN %d %s the host filter table\n", add ? "add" : "remove", tag, add ? "to" : "from"); } VTNET_CORE_UNLOCK(sc); } static void vtnet_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) { if (ifp->if_softc != arg) return; vtnet_update_vlan_filter(arg, 1, tag); } static void vtnet_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) { if (ifp->if_softc != arg) return; vtnet_update_vlan_filter(arg, 0, tag); } static int vtnet_is_link_up(struct vtnet_softc *sc) { device_t dev; struct ifnet *ifp; uint16_t status; dev = sc->vtnet_dev; ifp = sc->vtnet_ifp; if ((ifp->if_capabilities & IFCAP_LINKSTATE) == 0) status = VIRTIO_NET_S_LINK_UP; else status = virtio_read_dev_config_2(dev, offsetof(struct virtio_net_config, status)); return ((status & VIRTIO_NET_S_LINK_UP) != 0); } static void vtnet_update_link_status(struct vtnet_softc *sc) { struct ifnet *ifp; int link; ifp = sc->vtnet_ifp; VTNET_CORE_LOCK_ASSERT(sc); link = vtnet_is_link_up(sc); /* Notify if the link status has changed. */ if (link != 0 && sc->vtnet_link_active == 0) { sc->vtnet_link_active = 1; if_link_state_change(ifp, LINK_STATE_UP); } else if (link == 0 && sc->vtnet_link_active != 0) { sc->vtnet_link_active = 0; if_link_state_change(ifp, LINK_STATE_DOWN); } } static int vtnet_ifmedia_upd(struct ifnet *ifp) { struct vtnet_softc *sc; struct ifmedia *ifm; sc = ifp->if_softc; ifm = &sc->vtnet_media; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); return (0); } static void vtnet_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct vtnet_softc *sc; sc = ifp->if_softc; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; VTNET_CORE_LOCK(sc); if (vtnet_is_link_up(sc) != 0) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= VTNET_MEDIATYPE; } else ifmr->ifm_active |= IFM_NONE; VTNET_CORE_UNLOCK(sc); } static void vtnet_set_hwaddr(struct vtnet_softc *sc) { device_t dev; int i; dev = sc->vtnet_dev; if (sc->vtnet_flags & VTNET_FLAG_CTRL_MAC) { if (vtnet_ctrl_mac_cmd(sc, sc->vtnet_hwaddr) != 0) device_printf(dev, "unable to set MAC address\n"); } else if (sc->vtnet_flags & VTNET_FLAG_MAC) { for (i = 0; i < ETHER_ADDR_LEN; i++) { virtio_write_dev_config_1(dev, offsetof(struct virtio_net_config, mac) + i, sc->vtnet_hwaddr[i]); } } } static void vtnet_get_hwaddr(struct vtnet_softc *sc) { device_t dev; int i; dev = sc->vtnet_dev; if ((sc->vtnet_flags & VTNET_FLAG_MAC) == 0) { /* * Generate a random locally administered unicast address. * * It would be nice to generate the same MAC address across * reboots, but it seems all the hosts currently available * support the MAC feature, so this isn't too important. */ sc->vtnet_hwaddr[0] = 0xB2; arc4rand(&sc->vtnet_hwaddr[1], ETHER_ADDR_LEN - 1, 0); vtnet_set_hwaddr(sc); return; } for (i = 0; i < ETHER_ADDR_LEN; i++) { sc->vtnet_hwaddr[i] = virtio_read_dev_config_1(dev, offsetof(struct virtio_net_config, mac) + i); } } static void vtnet_vlan_tag_remove(struct mbuf *m) { struct ether_vlan_header *evh; evh = mtod(m, struct ether_vlan_header *); m->m_pkthdr.ether_vtag = ntohs(evh->evl_tag); m->m_flags |= M_VLANTAG; /* Strip the 802.1Q header. */ bcopy((char *) evh, (char *) evh + ETHER_VLAN_ENCAP_LEN, ETHER_HDR_LEN - ETHER_TYPE_LEN); m_adj(m, ETHER_VLAN_ENCAP_LEN); } static void vtnet_set_rx_process_limit(struct vtnet_softc *sc) { int limit; limit = vtnet_tunable_int(sc, "rx_process_limit", vtnet_rx_process_limit); if (limit < 0) limit = INT_MAX; sc->vtnet_rx_process_limit = limit; } static void vtnet_set_tx_intr_threshold(struct vtnet_softc *sc) { int size, thresh; size = virtqueue_size(sc->vtnet_txqs[0].vtntx_vq); /* * The Tx interrupt is disabled until the queue free count falls * below our threshold. Completed frames are drained from the Tx * virtqueue before transmitting new frames and in the watchdog * callout, so the frequency of Tx interrupts is greatly reduced, * at the cost of not freeing mbufs as quickly as they otherwise * would be. * * N.B. We assume all the Tx queues are the same size. */ thresh = size / 4; /* * Without indirect descriptors, leave enough room for the most * segments we handle. */ if ((sc->vtnet_flags & VTNET_FLAG_INDIRECT) == 0 && thresh < sc->vtnet_tx_nsegs) thresh = sc->vtnet_tx_nsegs; sc->vtnet_tx_intr_thresh = thresh; } static void vtnet_setup_rxq_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_rxq *rxq) { struct sysctl_oid *node; struct sysctl_oid_list *list; struct vtnet_rxq_stats *stats; char namebuf[16]; snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vtnrx_id); node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Receive Queue"); list = SYSCTL_CHILDREN(node); stats = &rxq->vtnrx_stats; SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD, &stats->vrxs_ipackets, "Receive packets"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD, &stats->vrxs_ibytes, "Receive bytes"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD, &stats->vrxs_iqdrops, "Receive drops"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD, &stats->vrxs_ierrors, "Receive errors"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, &stats->vrxs_csum, "Receive checksum offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum_failed", CTLFLAG_RD, &stats->vrxs_csum_failed, "Receive checksum offload failed"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, &stats->vrxs_rescheduled, "Receive interrupt handler rescheduled"); } static void vtnet_setup_txq_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_txq *txq) { struct sysctl_oid *node; struct sysctl_oid_list *list; struct vtnet_txq_stats *stats; char namebuf[16]; snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vtntx_id); node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Transmit Queue"); list = SYSCTL_CHILDREN(node); stats = &txq->vtntx_stats; SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD, &stats->vtxs_opackets, "Transmit packets"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD, &stats->vtxs_obytes, "Transmit bytes"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD, &stats->vtxs_omcasts, "Transmit multicasts"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, &stats->vtxs_csum, "Transmit checksum offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, &stats->vtxs_tso, "Transmit segmentation offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "rescheduled", CTLFLAG_RD, &stats->vtxs_rescheduled, "Transmit interrupt handler rescheduled"); } static void vtnet_setup_queue_sysctl(struct vtnet_softc *sc) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; int i; dev = sc->vtnet_dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); for (i = 0; i < sc->vtnet_max_vq_pairs; i++) { vtnet_setup_rxq_sysctl(ctx, child, &sc->vtnet_rxqs[i]); vtnet_setup_txq_sysctl(ctx, child, &sc->vtnet_txqs[i]); } } static void vtnet_setup_stat_sysctl(struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child, struct vtnet_softc *sc) { struct vtnet_statistics *stats; struct vtnet_rxq_stats rxaccum; struct vtnet_txq_stats txaccum; vtnet_accum_stats(sc, &rxaccum, &txaccum); stats = &sc->vtnet_stats; stats->rx_csum_offloaded = rxaccum.vrxs_csum; stats->rx_csum_failed = rxaccum.vrxs_csum_failed; stats->rx_task_rescheduled = rxaccum.vrxs_rescheduled; stats->tx_csum_offloaded = txaccum.vtxs_csum; stats->tx_tso_offloaded = txaccum.vtxs_tso; stats->tx_task_rescheduled = txaccum.vtxs_rescheduled; SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "mbuf_alloc_failed", CTLFLAG_RD, &stats->mbuf_alloc_failed, "Mbuf cluster allocation failures"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_frame_too_large", CTLFLAG_RD, &stats->rx_frame_too_large, "Received frame larger than the mbuf chain"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_enq_replacement_failed", CTLFLAG_RD, &stats->rx_enq_replacement_failed, "Enqueuing the replacement receive mbuf failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_mergeable_failed", CTLFLAG_RD, &stats->rx_mergeable_failed, "Mergeable buffers receive failures"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ethtype", CTLFLAG_RD, &stats->rx_csum_bad_ethtype, "Received checksum offloaded buffer with unsupported " "Ethernet type"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_ipproto", CTLFLAG_RD, &stats->rx_csum_bad_ipproto, "Received checksum offloaded buffer with incorrect IP protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_offset", CTLFLAG_RD, &stats->rx_csum_bad_offset, "Received checksum offloaded buffer with incorrect offset"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_bad_proto", CTLFLAG_RD, &stats->rx_csum_bad_proto, "Received checksum offloaded buffer with incorrect protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_failed", CTLFLAG_RD, &stats->rx_csum_failed, "Received buffer checksum offload failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_csum_offloaded", CTLFLAG_RD, &stats->rx_csum_offloaded, "Received buffer checksum offload succeeded"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_task_rescheduled", CTLFLAG_RD, &stats->rx_task_rescheduled, "Times the receive interrupt task rescheduled itself"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_bad_ethtype", CTLFLAG_RD, &stats->tx_csum_bad_ethtype, "Aborted transmit of checksum offloaded buffer with unknown " "Ethernet type"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_bad_ethtype", CTLFLAG_RD, &stats->tx_tso_bad_ethtype, "Aborted transmit of TSO buffer with unknown Ethernet type"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_not_tcp", CTLFLAG_RD, &stats->tx_tso_not_tcp, "Aborted transmit of TSO buffer with non TCP protocol"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defragged", CTLFLAG_RD, &stats->tx_defragged, "Transmit mbufs defragged"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_defrag_failed", CTLFLAG_RD, &stats->tx_defrag_failed, "Aborted transmit of buffer because defrag failed"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_csum_offloaded", CTLFLAG_RD, &stats->tx_csum_offloaded, "Offloaded checksum of transmitted buffer"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_tso_offloaded", CTLFLAG_RD, &stats->tx_tso_offloaded, "Segmentation offload of transmitted buffer"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_task_rescheduled", CTLFLAG_RD, &stats->tx_task_rescheduled, "Times the transmit interrupt task rescheduled itself"); } static void vtnet_setup_sysctl(struct vtnet_softc *sc) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = sc->vtnet_dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs", CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0, "Maximum number of supported virtqueue pairs"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs", CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0, "Requested number of virtqueue pairs"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs", CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0, "Number of active virtqueue pairs"); vtnet_setup_stat_sysctl(ctx, child, sc); } static int vtnet_rxq_enable_intr(struct vtnet_rxq *rxq) { return (virtqueue_enable_intr(rxq->vtnrx_vq)); } static void vtnet_rxq_disable_intr(struct vtnet_rxq *rxq) { virtqueue_disable_intr(rxq->vtnrx_vq); } static int vtnet_txq_enable_intr(struct vtnet_txq *txq) { struct virtqueue *vq; vq = txq->vtntx_vq; if (vtnet_txq_below_threshold(txq) != 0) return (virtqueue_postpone_intr(vq, VQ_POSTPONE_LONG)); /* * The free count is above our threshold. Keep the Tx interrupt * disabled until the queue is fuller. */ return (0); } static void vtnet_txq_disable_intr(struct vtnet_txq *txq) { virtqueue_disable_intr(txq->vtntx_vq); } static void vtnet_enable_rx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) vtnet_rxq_enable_intr(&sc->vtnet_rxqs[i]); } static void vtnet_enable_tx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) vtnet_txq_enable_intr(&sc->vtnet_txqs[i]); } static void vtnet_enable_interrupts(struct vtnet_softc *sc) { vtnet_enable_rx_interrupts(sc); vtnet_enable_tx_interrupts(sc); } static void vtnet_disable_rx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) vtnet_rxq_disable_intr(&sc->vtnet_rxqs[i]); } static void vtnet_disable_tx_interrupts(struct vtnet_softc *sc) { int i; for (i = 0; i < sc->vtnet_act_vq_pairs; i++) vtnet_txq_disable_intr(&sc->vtnet_txqs[i]); } static void vtnet_disable_interrupts(struct vtnet_softc *sc) { vtnet_disable_rx_interrupts(sc); vtnet_disable_tx_interrupts(sc); } static int vtnet_tunable_int(struct vtnet_softc *sc, const char *knob, int def) { char path[64]; snprintf(path, sizeof(path), "hw.vtnet.%d.%s", device_get_unit(sc->vtnet_dev), knob); TUNABLE_INT_FETCH(path, &def); return (def); } Index: head/sys/dev/virtio/pci/virtio_pci.c =================================================================== --- head/sys/dev/virtio/pci/virtio_pci.c (revision 327948) +++ head/sys/dev/virtio/pci/virtio_pci.c (revision 327949) @@ -1,1332 +1,1332 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2011, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Driver for the VirtIO PCI interface. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_bus_if.h" #include "virtio_if.h" struct vtpci_interrupt { struct resource *vti_irq; int vti_rid; void *vti_handler; }; struct vtpci_virtqueue { struct virtqueue *vtv_vq; int vtv_no_intr; }; struct vtpci_softc { device_t vtpci_dev; struct resource *vtpci_res; struct resource *vtpci_msix_res; uint64_t vtpci_features; uint32_t vtpci_flags; #define VTPCI_FLAG_NO_MSI 0x0001 #define VTPCI_FLAG_NO_MSIX 0x0002 #define VTPCI_FLAG_LEGACY 0x1000 #define VTPCI_FLAG_MSI 0x2000 #define VTPCI_FLAG_MSIX 0x4000 #define VTPCI_FLAG_SHARED_MSIX 0x8000 #define VTPCI_FLAG_ITYPE_MASK 0xF000 /* This "bus" will only ever have one child. */ device_t vtpci_child_dev; struct virtio_feature_desc *vtpci_child_feat_desc; int vtpci_nvqs; struct vtpci_virtqueue *vtpci_vqs; /* * Ideally, each virtqueue that the driver provides a callback for will * receive its own MSIX vector. If there are not sufficient vectors * available, then attempt to have all the VQs share one vector. For * MSIX, the configuration changed notifications must be on their own * vector. * * If MSIX is not available, we will attempt to have the whole device * share one MSI vector, and then, finally, one legacy interrupt. */ struct vtpci_interrupt vtpci_device_interrupt; struct vtpci_interrupt *vtpci_msix_vq_interrupts; int vtpci_nmsix_resources; }; static int vtpci_probe(device_t); static int vtpci_attach(device_t); static int vtpci_detach(device_t); static int vtpci_suspend(device_t); static int vtpci_resume(device_t); static int vtpci_shutdown(device_t); static void vtpci_driver_added(device_t, driver_t *); static void vtpci_child_detached(device_t, device_t); static int vtpci_read_ivar(device_t, device_t, int, uintptr_t *); static int vtpci_write_ivar(device_t, device_t, int, uintptr_t); static uint64_t vtpci_negotiate_features(device_t, uint64_t); static int vtpci_with_feature(device_t, uint64_t); static int vtpci_alloc_virtqueues(device_t, int, int, struct vq_alloc_info *); static int vtpci_setup_intr(device_t, enum intr_type); static void vtpci_stop(device_t); static int vtpci_reinit(device_t, uint64_t); static void vtpci_reinit_complete(device_t); static void vtpci_notify_virtqueue(device_t, uint16_t); static uint8_t vtpci_get_status(device_t); static void vtpci_set_status(device_t, uint8_t); static void vtpci_read_dev_config(device_t, bus_size_t, void *, int); static void vtpci_write_dev_config(device_t, bus_size_t, void *, int); static void vtpci_describe_features(struct vtpci_softc *, const char *, uint64_t); static void vtpci_probe_and_attach_child(struct vtpci_softc *); static int vtpci_alloc_msix(struct vtpci_softc *, int); static int vtpci_alloc_msi(struct vtpci_softc *); static int vtpci_alloc_intr_msix_pervq(struct vtpci_softc *); static int vtpci_alloc_intr_msix_shared(struct vtpci_softc *); static int vtpci_alloc_intr_msi(struct vtpci_softc *); static int vtpci_alloc_intr_legacy(struct vtpci_softc *); static int vtpci_alloc_interrupt(struct vtpci_softc *, int, int, struct vtpci_interrupt *); static int vtpci_alloc_intr_resources(struct vtpci_softc *); static int vtpci_setup_legacy_interrupt(struct vtpci_softc *, enum intr_type); static int vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *, enum intr_type); static int vtpci_setup_msix_interrupts(struct vtpci_softc *, enum intr_type); static int vtpci_setup_interrupts(struct vtpci_softc *, enum intr_type); static int vtpci_register_msix_vector(struct vtpci_softc *, int, struct vtpci_interrupt *); static int vtpci_set_host_msix_vectors(struct vtpci_softc *); static int vtpci_reinit_virtqueue(struct vtpci_softc *, int); static void vtpci_free_interrupt(struct vtpci_softc *, struct vtpci_interrupt *); static void vtpci_free_interrupts(struct vtpci_softc *); static void vtpci_free_virtqueues(struct vtpci_softc *); static void vtpci_release_child_resources(struct vtpci_softc *); static void vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *); static void vtpci_reset(struct vtpci_softc *); static void vtpci_select_virtqueue(struct vtpci_softc *, int); static void vtpci_legacy_intr(void *); static int vtpci_vq_shared_intr_filter(void *); static void vtpci_vq_shared_intr(void *); static int vtpci_vq_intr_filter(void *); static void vtpci_vq_intr(void *); static void vtpci_config_intr(void *); #define vtpci_setup_msi_interrupt vtpci_setup_legacy_interrupt #define VIRTIO_PCI_CONFIG(_sc) \ VIRTIO_PCI_CONFIG_OFF((((_sc)->vtpci_flags & VTPCI_FLAG_MSIX)) != 0) /* * I/O port read/write wrappers. */ #define vtpci_read_config_1(sc, o) bus_read_1((sc)->vtpci_res, (o)) #define vtpci_read_config_2(sc, o) bus_read_2((sc)->vtpci_res, (o)) #define vtpci_read_config_4(sc, o) bus_read_4((sc)->vtpci_res, (o)) #define vtpci_write_config_1(sc, o, v) bus_write_1((sc)->vtpci_res, (o), (v)) #define vtpci_write_config_2(sc, o, v) bus_write_2((sc)->vtpci_res, (o), (v)) #define vtpci_write_config_4(sc, o, v) bus_write_4((sc)->vtpci_res, (o), (v)) /* Tunables. */ static int vtpci_disable_msix = 0; TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix); static device_method_t vtpci_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, vtpci_probe), DEVMETHOD(device_attach, vtpci_attach), DEVMETHOD(device_detach, vtpci_detach), DEVMETHOD(device_suspend, vtpci_suspend), DEVMETHOD(device_resume, vtpci_resume), DEVMETHOD(device_shutdown, vtpci_shutdown), /* Bus interface. */ DEVMETHOD(bus_driver_added, vtpci_driver_added), DEVMETHOD(bus_child_detached, vtpci_child_detached), DEVMETHOD(bus_read_ivar, vtpci_read_ivar), DEVMETHOD(bus_write_ivar, vtpci_write_ivar), /* VirtIO bus interface. */ DEVMETHOD(virtio_bus_negotiate_features, vtpci_negotiate_features), DEVMETHOD(virtio_bus_with_feature, vtpci_with_feature), DEVMETHOD(virtio_bus_alloc_virtqueues, vtpci_alloc_virtqueues), DEVMETHOD(virtio_bus_setup_intr, vtpci_setup_intr), DEVMETHOD(virtio_bus_stop, vtpci_stop), DEVMETHOD(virtio_bus_reinit, vtpci_reinit), DEVMETHOD(virtio_bus_reinit_complete, vtpci_reinit_complete), DEVMETHOD(virtio_bus_notify_vq, vtpci_notify_virtqueue), DEVMETHOD(virtio_bus_read_device_config, vtpci_read_dev_config), DEVMETHOD(virtio_bus_write_device_config, vtpci_write_dev_config), DEVMETHOD_END }; static driver_t vtpci_driver = { "virtio_pci", vtpci_methods, sizeof(struct vtpci_softc) }; devclass_t vtpci_devclass; DRIVER_MODULE(virtio_pci, pci, vtpci_driver, vtpci_devclass, 0, 0); MODULE_VERSION(virtio_pci, 1); MODULE_DEPEND(virtio_pci, pci, 1, 1, 1); MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1); static int vtpci_probe(device_t dev) { char desc[36]; const char *name; if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID) return (ENXIO); if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN || pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX) return (ENXIO); if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION) return (ENXIO); name = virtio_device_name(pci_get_subdevice(dev)); if (name == NULL) name = "Unknown"; snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name); device_set_desc_copy(dev, desc); return (BUS_PROBE_DEFAULT); } static int vtpci_attach(device_t dev) { struct vtpci_softc *sc; device_t child; int rid; sc = device_get_softc(dev); sc->vtpci_dev = dev; pci_enable_busmaster(dev); rid = PCIR_BAR(0); sc->vtpci_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (sc->vtpci_res == NULL) { device_printf(dev, "cannot map I/O space\n"); return (ENXIO); } if (pci_find_cap(dev, PCIY_MSI, NULL) != 0) sc->vtpci_flags |= VTPCI_FLAG_NO_MSI; if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) { rid = PCIR_BAR(1); sc->vtpci_msix_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); } if (sc->vtpci_msix_res == NULL) sc->vtpci_flags |= VTPCI_FLAG_NO_MSIX; vtpci_reset(sc); /* Tell the host we've noticed this device. */ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); if ((child = device_add_child(dev, NULL, -1)) == NULL) { device_printf(dev, "cannot create child device\n"); vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); vtpci_detach(dev); return (ENOMEM); } sc->vtpci_child_dev = child; vtpci_probe_and_attach_child(sc); return (0); } static int vtpci_detach(device_t dev) { struct vtpci_softc *sc; device_t child; int error; sc = device_get_softc(dev); if ((child = sc->vtpci_child_dev) != NULL) { error = device_delete_child(dev, child); if (error) return (error); sc->vtpci_child_dev = NULL; } vtpci_reset(sc); if (sc->vtpci_msix_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(1), sc->vtpci_msix_res); sc->vtpci_msix_res = NULL; } if (sc->vtpci_res != NULL) { bus_release_resource(dev, SYS_RES_IOPORT, PCIR_BAR(0), sc->vtpci_res); sc->vtpci_res = NULL; } return (0); } static int vtpci_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int vtpci_resume(device_t dev) { return (bus_generic_resume(dev)); } static int vtpci_shutdown(device_t dev) { (void) bus_generic_shutdown(dev); /* Forcibly stop the host device. */ vtpci_stop(dev); return (0); } static void vtpci_driver_added(device_t dev, driver_t *driver) { struct vtpci_softc *sc; sc = device_get_softc(dev); vtpci_probe_and_attach_child(sc); } static void vtpci_child_detached(device_t dev, device_t child) { struct vtpci_softc *sc; sc = device_get_softc(dev); vtpci_reset(sc); vtpci_release_child_resources(sc); } static int vtpci_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct vtpci_softc *sc; sc = device_get_softc(dev); if (sc->vtpci_child_dev != child) return (ENOENT); switch (index) { case VIRTIO_IVAR_DEVTYPE: case VIRTIO_IVAR_SUBDEVICE: *result = pci_get_subdevice(dev); break; case VIRTIO_IVAR_VENDOR: *result = pci_get_vendor(dev); break; case VIRTIO_IVAR_DEVICE: *result = pci_get_device(dev); break; case VIRTIO_IVAR_SUBVENDOR: *result = pci_get_subdevice(dev); break; default: return (ENOENT); } return (0); } static int vtpci_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct vtpci_softc *sc; sc = device_get_softc(dev); if (sc->vtpci_child_dev != child) return (ENOENT); switch (index) { case VIRTIO_IVAR_FEATURE_DESC: sc->vtpci_child_feat_desc = (void *) value; break; default: return (ENOENT); } return (0); } static uint64_t vtpci_negotiate_features(device_t dev, uint64_t child_features) { struct vtpci_softc *sc; uint64_t host_features, features; sc = device_get_softc(dev); host_features = vtpci_read_config_4(sc, VIRTIO_PCI_HOST_FEATURES); vtpci_describe_features(sc, "host", host_features); /* * Limit negotiated features to what the driver, virtqueue, and * host all support. */ features = host_features & child_features; features = virtqueue_filter_features(features); sc->vtpci_features = features; vtpci_describe_features(sc, "negotiated", features); vtpci_write_config_4(sc, VIRTIO_PCI_GUEST_FEATURES, features); return (features); } static int vtpci_with_feature(device_t dev, uint64_t feature) { struct vtpci_softc *sc; sc = device_get_softc(dev); return ((sc->vtpci_features & feature) != 0); } static int vtpci_alloc_virtqueues(device_t dev, int flags, int nvqs, struct vq_alloc_info *vq_info) { struct vtpci_softc *sc; struct virtqueue *vq; struct vtpci_virtqueue *vqx; struct vq_alloc_info *info; int idx, error; uint16_t size; sc = device_get_softc(dev); if (sc->vtpci_nvqs != 0) return (EALREADY); if (nvqs <= 0) return (EINVAL); - sc->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue), + sc->vtpci_vqs = mallocarray(nvqs, sizeof(struct vtpci_virtqueue), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtpci_vqs == NULL) return (ENOMEM); for (idx = 0; idx < nvqs; idx++) { vqx = &sc->vtpci_vqs[idx]; info = &vq_info[idx]; vtpci_select_virtqueue(sc, idx); size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); error = virtqueue_alloc(dev, idx, size, VIRTIO_PCI_VRING_ALIGN, 0xFFFFFFFFUL, info, &vq); if (error) { device_printf(dev, "cannot allocate virtqueue %d: %d\n", idx, error); break; } vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); vqx->vtv_vq = *info->vqai_vq = vq; vqx->vtv_no_intr = info->vqai_intr == NULL; sc->vtpci_nvqs++; } if (error) vtpci_free_virtqueues(sc); return (error); } static int vtpci_setup_intr(device_t dev, enum intr_type type) { struct vtpci_softc *sc; int attempt, error; sc = device_get_softc(dev); for (attempt = 0; attempt < 5; attempt++) { /* * Start with the most desirable interrupt configuration and * fallback towards less desirable ones. */ switch (attempt) { case 0: error = vtpci_alloc_intr_msix_pervq(sc); break; case 1: error = vtpci_alloc_intr_msix_shared(sc); break; case 2: error = vtpci_alloc_intr_msi(sc); break; case 3: error = vtpci_alloc_intr_legacy(sc); break; default: device_printf(dev, "exhausted all interrupt allocation attempts\n"); return (ENXIO); } if (error == 0 && vtpci_setup_interrupts(sc, type) == 0) break; vtpci_cleanup_setup_intr_attempt(sc); } if (bootverbose) { if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) device_printf(dev, "using legacy interrupt\n"); else if (sc->vtpci_flags & VTPCI_FLAG_MSI) device_printf(dev, "using MSI interrupt\n"); else if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) device_printf(dev, "using shared MSIX interrupts\n"); else device_printf(dev, "using per VQ MSIX interrupts\n"); } return (0); } static void vtpci_stop(device_t dev) { vtpci_reset(device_get_softc(dev)); } static int vtpci_reinit(device_t dev, uint64_t features) { struct vtpci_softc *sc; int idx, error; sc = device_get_softc(dev); /* * Redrive the device initialization. This is a bit of an abuse of * the specification, but VirtualBox, QEMU/KVM, and BHyVe seem to * play nice. * * We do not allow the host device to change from what was originally * negotiated beyond what the guest driver changed. MSIX state should * not change, number of virtqueues and their size remain the same, etc. * This will need to be rethought when we want to support migration. */ if (vtpci_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) vtpci_stop(dev); /* * Quickly drive the status through ACK and DRIVER. The device * does not become usable again until vtpci_reinit_complete(). */ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); vtpci_negotiate_features(dev, features); for (idx = 0; idx < sc->vtpci_nvqs; idx++) { error = vtpci_reinit_virtqueue(sc, idx); if (error) return (error); } if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { error = vtpci_set_host_msix_vectors(sc); if (error) return (error); } return (0); } static void vtpci_reinit_complete(device_t dev) { vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); } static void vtpci_notify_virtqueue(device_t dev, uint16_t queue) { struct vtpci_softc *sc; sc = device_get_softc(dev); vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_NOTIFY, queue); } static uint8_t vtpci_get_status(device_t dev) { struct vtpci_softc *sc; sc = device_get_softc(dev); return (vtpci_read_config_1(sc, VIRTIO_PCI_STATUS)); } static void vtpci_set_status(device_t dev, uint8_t status) { struct vtpci_softc *sc; sc = device_get_softc(dev); if (status != VIRTIO_CONFIG_STATUS_RESET) status |= vtpci_get_status(dev); vtpci_write_config_1(sc, VIRTIO_PCI_STATUS, status); } static void vtpci_read_dev_config(device_t dev, bus_size_t offset, void *dst, int length) { struct vtpci_softc *sc; bus_size_t off; uint8_t *d; int size; sc = device_get_softc(dev); off = VIRTIO_PCI_CONFIG(sc) + offset; for (d = dst; length > 0; d += size, off += size, length -= size) { if (length >= 4) { size = 4; *(uint32_t *)d = vtpci_read_config_4(sc, off); } else if (length >= 2) { size = 2; *(uint16_t *)d = vtpci_read_config_2(sc, off); } else { size = 1; *d = vtpci_read_config_1(sc, off); } } } static void vtpci_write_dev_config(device_t dev, bus_size_t offset, void *src, int length) { struct vtpci_softc *sc; bus_size_t off; uint8_t *s; int size; sc = device_get_softc(dev); off = VIRTIO_PCI_CONFIG(sc) + offset; for (s = src; length > 0; s += size, off += size, length -= size) { if (length >= 4) { size = 4; vtpci_write_config_4(sc, off, *(uint32_t *)s); } else if (length >= 2) { size = 2; vtpci_write_config_2(sc, off, *(uint16_t *)s); } else { size = 1; vtpci_write_config_1(sc, off, *s); } } } static void vtpci_describe_features(struct vtpci_softc *sc, const char *msg, uint64_t features) { device_t dev, child; dev = sc->vtpci_dev; child = sc->vtpci_child_dev; if (device_is_attached(child) || bootverbose == 0) return; virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc); } static void vtpci_probe_and_attach_child(struct vtpci_softc *sc) { device_t dev, child; dev = sc->vtpci_dev; child = sc->vtpci_child_dev; if (child == NULL) return; if (device_get_state(child) != DS_NOTPRESENT) return; if (device_probe(child) != 0) return; vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); if (device_attach(child) != 0) { vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); vtpci_reset(sc); vtpci_release_child_resources(sc); /* Reset status for future attempt. */ vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); } else { vtpci_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); VIRTIO_ATTACH_COMPLETED(child); } } static int vtpci_alloc_msix(struct vtpci_softc *sc, int nvectors) { device_t dev; int nmsix, cnt, required; dev = sc->vtpci_dev; /* Allocate an additional vector for the config changes. */ required = nvectors + 1; nmsix = pci_msix_count(dev); if (nmsix < required) return (1); cnt = required; if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) { sc->vtpci_nmsix_resources = required; return (0); } pci_release_msi(dev); return (1); } static int vtpci_alloc_msi(struct vtpci_softc *sc) { device_t dev; int nmsi, cnt, required; dev = sc->vtpci_dev; required = 1; nmsi = pci_msi_count(dev); if (nmsi < required) return (1); cnt = required; if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) return (0); pci_release_msi(dev); return (1); } static int vtpci_alloc_intr_msix_pervq(struct vtpci_softc *sc) { int i, nvectors, error; if (vtpci_disable_msix != 0 || sc->vtpci_flags & VTPCI_FLAG_NO_MSIX) return (ENOTSUP); for (nvectors = 0, i = 0; i < sc->vtpci_nvqs; i++) { if (sc->vtpci_vqs[i].vtv_no_intr == 0) nvectors++; } error = vtpci_alloc_msix(sc, nvectors); if (error) return (error); sc->vtpci_flags |= VTPCI_FLAG_MSIX; return (0); } static int vtpci_alloc_intr_msix_shared(struct vtpci_softc *sc) { int error; if (vtpci_disable_msix != 0 || sc->vtpci_flags & VTPCI_FLAG_NO_MSIX) return (ENOTSUP); error = vtpci_alloc_msix(sc, 1); if (error) return (error); sc->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX; return (0); } static int vtpci_alloc_intr_msi(struct vtpci_softc *sc) { int error; /* Only BHyVe supports MSI. */ if (sc->vtpci_flags & VTPCI_FLAG_NO_MSI) return (ENOTSUP); error = vtpci_alloc_msi(sc); if (error) return (error); sc->vtpci_flags |= VTPCI_FLAG_MSI; return (0); } static int vtpci_alloc_intr_legacy(struct vtpci_softc *sc) { sc->vtpci_flags |= VTPCI_FLAG_LEGACY; return (0); } static int vtpci_alloc_interrupt(struct vtpci_softc *sc, int rid, int flags, struct vtpci_interrupt *intr) { struct resource *irq; irq = bus_alloc_resource_any(sc->vtpci_dev, SYS_RES_IRQ, &rid, flags); if (irq == NULL) return (ENXIO); intr->vti_irq = irq; intr->vti_rid = rid; return (0); } static int vtpci_alloc_intr_resources(struct vtpci_softc *sc) { struct vtpci_interrupt *intr; int i, rid, flags, nvq_intrs, error; rid = 0; flags = RF_ACTIVE; if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) flags |= RF_SHAREABLE; else rid = 1; /* * For legacy and MSI interrupts, this single resource handles all * interrupts. For MSIX, this resource is used for the configuration * changed interrupt. */ intr = &sc->vtpci_device_interrupt; error = vtpci_alloc_interrupt(sc, rid, flags, intr); if (error || sc->vtpci_flags & (VTPCI_FLAG_LEGACY | VTPCI_FLAG_MSI)) return (error); /* Subtract one for the configuration changed interrupt. */ nvq_intrs = sc->vtpci_nmsix_resources - 1; - intr = sc->vtpci_msix_vq_interrupts = malloc(nvq_intrs * + intr = sc->vtpci_msix_vq_interrupts = mallocarray(nvq_intrs, sizeof(struct vtpci_interrupt), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtpci_msix_vq_interrupts == NULL) return (ENOMEM); for (i = 0, rid++; i < nvq_intrs; i++, rid++, intr++) { error = vtpci_alloc_interrupt(sc, rid, flags, intr); if (error) return (error); } return (0); } static int vtpci_setup_legacy_interrupt(struct vtpci_softc *sc, enum intr_type type) { struct vtpci_interrupt *intr; int error; intr = &sc->vtpci_device_interrupt; error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type, NULL, vtpci_legacy_intr, sc, &intr->vti_handler); return (error); } static int vtpci_setup_pervq_msix_interrupts(struct vtpci_softc *sc, enum intr_type type) { struct vtpci_virtqueue *vqx; struct vtpci_interrupt *intr; int i, error; intr = sc->vtpci_msix_vq_interrupts; for (i = 0; i < sc->vtpci_nvqs; i++) { vqx = &sc->vtpci_vqs[i]; if (vqx->vtv_no_intr) continue; error = bus_setup_intr(sc->vtpci_dev, intr->vti_irq, type, vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vtv_vq, &intr->vti_handler); if (error) return (error); intr++; } return (0); } static int vtpci_setup_msix_interrupts(struct vtpci_softc *sc, enum intr_type type) { device_t dev; struct vtpci_interrupt *intr; int error; dev = sc->vtpci_dev; intr = &sc->vtpci_device_interrupt; error = bus_setup_intr(dev, intr->vti_irq, type, NULL, vtpci_config_intr, sc, &intr->vti_handler); if (error) return (error); if (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) { intr = sc->vtpci_msix_vq_interrupts; error = bus_setup_intr(dev, intr->vti_irq, type, vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, sc, &intr->vti_handler); } else error = vtpci_setup_pervq_msix_interrupts(sc, type); return (error ? error : vtpci_set_host_msix_vectors(sc)); } static int vtpci_setup_interrupts(struct vtpci_softc *sc, enum intr_type type) { int error; type |= INTR_MPSAFE; KASSERT(sc->vtpci_flags & VTPCI_FLAG_ITYPE_MASK, ("%s: no interrupt type selected %#x", __func__, sc->vtpci_flags)); error = vtpci_alloc_intr_resources(sc); if (error) return (error); if (sc->vtpci_flags & VTPCI_FLAG_LEGACY) error = vtpci_setup_legacy_interrupt(sc, type); else if (sc->vtpci_flags & VTPCI_FLAG_MSI) error = vtpci_setup_msi_interrupt(sc, type); else error = vtpci_setup_msix_interrupts(sc, type); return (error); } static int vtpci_register_msix_vector(struct vtpci_softc *sc, int offset, struct vtpci_interrupt *intr) { device_t dev; uint16_t vector; dev = sc->vtpci_dev; if (intr != NULL) { /* Map from guest rid to host vector. */ vector = intr->vti_rid - 1; } else vector = VIRTIO_MSI_NO_VECTOR; vtpci_write_config_2(sc, offset, vector); /* Read vector to determine if the host had sufficient resources. */ if (vtpci_read_config_2(sc, offset) != vector) { device_printf(dev, "insufficient host resources for MSIX interrupts\n"); return (ENODEV); } return (0); } static int vtpci_set_host_msix_vectors(struct vtpci_softc *sc) { struct vtpci_interrupt *intr, *tintr; int idx, offset, error; intr = &sc->vtpci_device_interrupt; offset = VIRTIO_MSI_CONFIG_VECTOR; error = vtpci_register_msix_vector(sc, offset, intr); if (error) return (error); intr = sc->vtpci_msix_vq_interrupts; offset = VIRTIO_MSI_QUEUE_VECTOR; for (idx = 0; idx < sc->vtpci_nvqs; idx++) { vtpci_select_virtqueue(sc, idx); if (sc->vtpci_vqs[idx].vtv_no_intr) tintr = NULL; else tintr = intr; error = vtpci_register_msix_vector(sc, offset, tintr); if (error) break; /* * For shared MSIX, all the virtqueues share the first * interrupt. */ if (!sc->vtpci_vqs[idx].vtv_no_intr && (sc->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) == 0) intr++; } return (error); } static int vtpci_reinit_virtqueue(struct vtpci_softc *sc, int idx) { struct vtpci_virtqueue *vqx; struct virtqueue *vq; int error; uint16_t size; vqx = &sc->vtpci_vqs[idx]; vq = vqx->vtv_vq; KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx)); vtpci_select_virtqueue(sc, idx); size = vtpci_read_config_2(sc, VIRTIO_PCI_QUEUE_NUM); error = virtqueue_reinit(vq, size); if (error) return (error); vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, virtqueue_paddr(vq) >> VIRTIO_PCI_QUEUE_ADDR_SHIFT); return (0); } static void vtpci_free_interrupt(struct vtpci_softc *sc, struct vtpci_interrupt *intr) { device_t dev; dev = sc->vtpci_dev; if (intr->vti_handler != NULL) { bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler); intr->vti_handler = NULL; } if (intr->vti_irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid, intr->vti_irq); intr->vti_irq = NULL; intr->vti_rid = -1; } } static void vtpci_free_interrupts(struct vtpci_softc *sc) { struct vtpci_interrupt *intr; int i, nvq_intrs; vtpci_free_interrupt(sc, &sc->vtpci_device_interrupt); if (sc->vtpci_nmsix_resources != 0) { nvq_intrs = sc->vtpci_nmsix_resources - 1; sc->vtpci_nmsix_resources = 0; intr = sc->vtpci_msix_vq_interrupts; if (intr != NULL) { for (i = 0; i < nvq_intrs; i++, intr++) vtpci_free_interrupt(sc, intr); free(sc->vtpci_msix_vq_interrupts, M_DEVBUF); sc->vtpci_msix_vq_interrupts = NULL; } } if (sc->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX)) pci_release_msi(sc->vtpci_dev); sc->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK; } static void vtpci_free_virtqueues(struct vtpci_softc *sc) { struct vtpci_virtqueue *vqx; int idx; for (idx = 0; idx < sc->vtpci_nvqs; idx++) { vqx = &sc->vtpci_vqs[idx]; vtpci_select_virtqueue(sc, idx); vtpci_write_config_4(sc, VIRTIO_PCI_QUEUE_PFN, 0); virtqueue_free(vqx->vtv_vq); vqx->vtv_vq = NULL; } free(sc->vtpci_vqs, M_DEVBUF); sc->vtpci_vqs = NULL; sc->vtpci_nvqs = 0; } static void vtpci_release_child_resources(struct vtpci_softc *sc) { vtpci_free_interrupts(sc); vtpci_free_virtqueues(sc); } static void vtpci_cleanup_setup_intr_attempt(struct vtpci_softc *sc) { int idx; if (sc->vtpci_flags & VTPCI_FLAG_MSIX) { vtpci_write_config_2(sc, VIRTIO_MSI_CONFIG_VECTOR, VIRTIO_MSI_NO_VECTOR); for (idx = 0; idx < sc->vtpci_nvqs; idx++) { vtpci_select_virtqueue(sc, idx); vtpci_write_config_2(sc, VIRTIO_MSI_QUEUE_VECTOR, VIRTIO_MSI_NO_VECTOR); } } vtpci_free_interrupts(sc); } static void vtpci_reset(struct vtpci_softc *sc) { /* * Setting the status to RESET sets the host device to * the original, uninitialized state. */ vtpci_set_status(sc->vtpci_dev, VIRTIO_CONFIG_STATUS_RESET); } static void vtpci_select_virtqueue(struct vtpci_softc *sc, int idx) { vtpci_write_config_2(sc, VIRTIO_PCI_QUEUE_SEL, idx); } static void vtpci_legacy_intr(void *xsc) { struct vtpci_softc *sc; struct vtpci_virtqueue *vqx; int i; uint8_t isr; sc = xsc; vqx = &sc->vtpci_vqs[0]; /* Reading the ISR also clears it. */ isr = vtpci_read_config_1(sc, VIRTIO_PCI_ISR); if (isr & VIRTIO_PCI_ISR_CONFIG) vtpci_config_intr(sc); if (isr & VIRTIO_PCI_ISR_INTR) { for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) { if (vqx->vtv_no_intr == 0) virtqueue_intr(vqx->vtv_vq); } } } static int vtpci_vq_shared_intr_filter(void *xsc) { struct vtpci_softc *sc; struct vtpci_virtqueue *vqx; int i, rc; rc = 0; sc = xsc; vqx = &sc->vtpci_vqs[0]; for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) { if (vqx->vtv_no_intr == 0) rc |= virtqueue_intr_filter(vqx->vtv_vq); } return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); } static void vtpci_vq_shared_intr(void *xsc) { struct vtpci_softc *sc; struct vtpci_virtqueue *vqx; int i; sc = xsc; vqx = &sc->vtpci_vqs[0]; for (i = 0; i < sc->vtpci_nvqs; i++, vqx++) { if (vqx->vtv_no_intr == 0) virtqueue_intr(vqx->vtv_vq); } } static int vtpci_vq_intr_filter(void *xvq) { struct virtqueue *vq; int rc; vq = xvq; rc = virtqueue_intr_filter(vq); return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); } static void vtpci_vq_intr(void *xvq) { struct virtqueue *vq; vq = xvq; virtqueue_intr(vq); } static void vtpci_config_intr(void *xsc) { struct vtpci_softc *sc; device_t child; sc = xsc; child = sc->vtpci_child_dev; if (child != NULL) VIRTIO_CONFIG_CHANGE(child); } Index: head/sys/dev/vmware/vmxnet3/if_vmx.c =================================================================== --- head/sys/dev/vmware/vmxnet3/if_vmx.c (revision 327948) +++ head/sys/dev/vmware/vmxnet3/if_vmx.c (revision 327949) @@ -1,3950 +1,3950 @@ /*- * Copyright (c) 2013 Tsubai Masanari * Copyright (c) 2013 Bryan Venteicher * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * * $OpenBSD: src/sys/dev/pci/if_vmx.c,v 1.11 2013/06/22 00:28:10 uebayasi Exp $ */ /* Driver for VMware vmxnet3 virtual ethernet devices. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "if_vmxreg.h" #include "if_vmxvar.h" #include "opt_inet.h" #include "opt_inet6.h" #ifdef VMXNET3_FAILPOINTS #include static SYSCTL_NODE(DEBUG_FP, OID_AUTO, vmxnet3, CTLFLAG_RW, 0, "vmxnet3 fail points"); #define VMXNET3_FP _debug_fail_point_vmxnet3 #endif static int vmxnet3_probe(device_t); static int vmxnet3_attach(device_t); static int vmxnet3_detach(device_t); static int vmxnet3_shutdown(device_t); static int vmxnet3_alloc_resources(struct vmxnet3_softc *); static void vmxnet3_free_resources(struct vmxnet3_softc *); static int vmxnet3_check_version(struct vmxnet3_softc *); static void vmxnet3_initial_config(struct vmxnet3_softc *); static void vmxnet3_check_multiqueue(struct vmxnet3_softc *); static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *); static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *); static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *); static int vmxnet3_alloc_interrupt(struct vmxnet3_softc *, int, int, struct vmxnet3_interrupt *); static int vmxnet3_alloc_intr_resources(struct vmxnet3_softc *); static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *); static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *); static int vmxnet3_setup_interrupts(struct vmxnet3_softc *); static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *); static void vmxnet3_free_interrupt(struct vmxnet3_softc *, struct vmxnet3_interrupt *); static void vmxnet3_free_interrupts(struct vmxnet3_softc *); #ifndef VMXNET3_LEGACY_TX static int vmxnet3_alloc_taskqueue(struct vmxnet3_softc *); static void vmxnet3_start_taskqueue(struct vmxnet3_softc *); static void vmxnet3_drain_taskqueue(struct vmxnet3_softc *); static void vmxnet3_free_taskqueue(struct vmxnet3_softc *); #endif static int vmxnet3_init_rxq(struct vmxnet3_softc *, int); static int vmxnet3_init_txq(struct vmxnet3_softc *, int); static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *); static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *); static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *); static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *); static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *); static void vmxnet3_free_shared_data(struct vmxnet3_softc *); static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *); static void vmxnet3_free_txq_data(struct vmxnet3_softc *); static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *); static void vmxnet3_free_rxq_data(struct vmxnet3_softc *); static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *); static void vmxnet3_free_queue_data(struct vmxnet3_softc *); static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *); static void vmxnet3_init_shared_data(struct vmxnet3_softc *); static void vmxnet3_init_hwassist(struct vmxnet3_softc *); static void vmxnet3_reinit_interface(struct vmxnet3_softc *); static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *); static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *); static int vmxnet3_alloc_data(struct vmxnet3_softc *); static void vmxnet3_free_data(struct vmxnet3_softc *); static int vmxnet3_setup_interface(struct vmxnet3_softc *); static void vmxnet3_evintr(struct vmxnet3_softc *); static void vmxnet3_txq_eof(struct vmxnet3_txqueue *); static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *, struct mbuf *); static int vmxnet3_newbuf(struct vmxnet3_softc *, struct vmxnet3_rxring *); static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *, struct vmxnet3_rxring *, int); static void vmxnet3_rxq_eof(struct vmxnet3_rxqueue *); static void vmxnet3_legacy_intr(void *); static void vmxnet3_txq_intr(void *); static void vmxnet3_rxq_intr(void *); static void vmxnet3_event_intr(void *); static void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *); static void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); static void vmxnet3_stop(struct vmxnet3_softc *); static void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *); static int vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); static int vmxnet3_reinit_queues(struct vmxnet3_softc *); static int vmxnet3_enable_device(struct vmxnet3_softc *); static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *); static int vmxnet3_reinit(struct vmxnet3_softc *); static void vmxnet3_init_locked(struct vmxnet3_softc *); static void vmxnet3_init(void *); static int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *,struct mbuf *, int *, int *, int *); static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *, struct mbuf **, bus_dmamap_t, bus_dma_segment_t [], int *); static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *, bus_dmamap_t); static int vmxnet3_txq_encap(struct vmxnet3_txqueue *, struct mbuf **); #ifdef VMXNET3_LEGACY_TX static void vmxnet3_start_locked(struct ifnet *); static void vmxnet3_start(struct ifnet *); #else static int vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *, struct mbuf *); static int vmxnet3_txq_mq_start(struct ifnet *, struct mbuf *); static void vmxnet3_txq_tq_deferred(void *, int); #endif static void vmxnet3_txq_start(struct vmxnet3_txqueue *); static void vmxnet3_tx_start_all(struct vmxnet3_softc *); static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *, int, uint16_t); static void vmxnet3_register_vlan(void *, struct ifnet *, uint16_t); static void vmxnet3_unregister_vlan(void *, struct ifnet *, uint16_t); static void vmxnet3_set_rxfilter(struct vmxnet3_softc *); static int vmxnet3_change_mtu(struct vmxnet3_softc *, int); static int vmxnet3_ioctl(struct ifnet *, u_long, caddr_t); static uint64_t vmxnet3_get_counter(struct ifnet *, ift_counter); #ifndef VMXNET3_LEGACY_TX static void vmxnet3_qflush(struct ifnet *); #endif static int vmxnet3_watchdog(struct vmxnet3_txqueue *); static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *); static void vmxnet3_tick(void *); static void vmxnet3_link_status(struct vmxnet3_softc *); static void vmxnet3_media_status(struct ifnet *, struct ifmediareq *); static int vmxnet3_media_change(struct ifnet *); static void vmxnet3_set_lladdr(struct vmxnet3_softc *); static void vmxnet3_get_lladdr(struct vmxnet3_softc *); static void vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *, struct sysctl_ctx_list *, struct sysctl_oid_list *); static void vmxnet3_setup_sysctl(struct vmxnet3_softc *); static void vmxnet3_write_bar0(struct vmxnet3_softc *, bus_size_t, uint32_t); static uint32_t vmxnet3_read_bar1(struct vmxnet3_softc *, bus_size_t); static void vmxnet3_write_bar1(struct vmxnet3_softc *, bus_size_t, uint32_t); static void vmxnet3_write_cmd(struct vmxnet3_softc *, uint32_t); static uint32_t vmxnet3_read_cmd(struct vmxnet3_softc *, uint32_t); static void vmxnet3_enable_intr(struct vmxnet3_softc *, int); static void vmxnet3_disable_intr(struct vmxnet3_softc *, int); static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *); static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *); static int vmxnet3_dma_malloc(struct vmxnet3_softc *, bus_size_t, bus_size_t, struct vmxnet3_dma_alloc *); static void vmxnet3_dma_free(struct vmxnet3_softc *, struct vmxnet3_dma_alloc *); static int vmxnet3_tunable_int(struct vmxnet3_softc *, const char *, int); typedef enum { VMXNET3_BARRIER_RD, VMXNET3_BARRIER_WR, VMXNET3_BARRIER_RDWR, } vmxnet3_barrier_t; static void vmxnet3_barrier(struct vmxnet3_softc *, vmxnet3_barrier_t); /* Tunables. */ static int vmxnet3_mq_disable = 0; TUNABLE_INT("hw.vmx.mq_disable", &vmxnet3_mq_disable); static int vmxnet3_default_txnqueue = VMXNET3_DEF_TX_QUEUES; TUNABLE_INT("hw.vmx.txnqueue", &vmxnet3_default_txnqueue); static int vmxnet3_default_rxnqueue = VMXNET3_DEF_RX_QUEUES; TUNABLE_INT("hw.vmx.rxnqueue", &vmxnet3_default_rxnqueue); static int vmxnet3_default_txndesc = VMXNET3_DEF_TX_NDESC; TUNABLE_INT("hw.vmx.txndesc", &vmxnet3_default_txndesc); static int vmxnet3_default_rxndesc = VMXNET3_DEF_RX_NDESC; TUNABLE_INT("hw.vmx.rxndesc", &vmxnet3_default_rxndesc); static device_method_t vmxnet3_methods[] = { /* Device interface. */ DEVMETHOD(device_probe, vmxnet3_probe), DEVMETHOD(device_attach, vmxnet3_attach), DEVMETHOD(device_detach, vmxnet3_detach), DEVMETHOD(device_shutdown, vmxnet3_shutdown), DEVMETHOD_END }; static driver_t vmxnet3_driver = { "vmx", vmxnet3_methods, sizeof(struct vmxnet3_softc) }; static devclass_t vmxnet3_devclass; DRIVER_MODULE(vmx, pci, vmxnet3_driver, vmxnet3_devclass, 0, 0); MODULE_DEPEND(vmx, pci, 1, 1, 1); MODULE_DEPEND(vmx, ether, 1, 1, 1); #define VMXNET3_VMWARE_VENDOR_ID 0x15AD #define VMXNET3_VMWARE_DEVICE_ID 0x07B0 static int vmxnet3_probe(device_t dev) { if (pci_get_vendor(dev) == VMXNET3_VMWARE_VENDOR_ID && pci_get_device(dev) == VMXNET3_VMWARE_DEVICE_ID) { device_set_desc(dev, "VMware VMXNET3 Ethernet Adapter"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int vmxnet3_attach(device_t dev) { struct vmxnet3_softc *sc; int error; sc = device_get_softc(dev); sc->vmx_dev = dev; pci_enable_busmaster(dev); VMXNET3_CORE_LOCK_INIT(sc, device_get_nameunit(dev)); callout_init_mtx(&sc->vmx_tick, &sc->vmx_mtx, 0); vmxnet3_initial_config(sc); error = vmxnet3_alloc_resources(sc); if (error) goto fail; error = vmxnet3_check_version(sc); if (error) goto fail; error = vmxnet3_alloc_rxtx_queues(sc); if (error) goto fail; #ifndef VMXNET3_LEGACY_TX error = vmxnet3_alloc_taskqueue(sc); if (error) goto fail; #endif error = vmxnet3_alloc_interrupts(sc); if (error) goto fail; vmxnet3_check_multiqueue(sc); error = vmxnet3_alloc_data(sc); if (error) goto fail; error = vmxnet3_setup_interface(sc); if (error) goto fail; error = vmxnet3_setup_interrupts(sc); if (error) { ether_ifdetach(sc->vmx_ifp); device_printf(dev, "could not set up interrupt\n"); goto fail; } vmxnet3_setup_sysctl(sc); #ifndef VMXNET3_LEGACY_TX vmxnet3_start_taskqueue(sc); #endif fail: if (error) vmxnet3_detach(dev); return (error); } static int vmxnet3_detach(device_t dev) { struct vmxnet3_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->vmx_ifp; if (device_is_attached(dev)) { VMXNET3_CORE_LOCK(sc); vmxnet3_stop(sc); VMXNET3_CORE_UNLOCK(sc); callout_drain(&sc->vmx_tick); #ifndef VMXNET3_LEGACY_TX vmxnet3_drain_taskqueue(sc); #endif ether_ifdetach(ifp); } if (sc->vmx_vlan_attach != NULL) { EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_attach); sc->vmx_vlan_attach = NULL; } if (sc->vmx_vlan_detach != NULL) { EVENTHANDLER_DEREGISTER(vlan_config, sc->vmx_vlan_detach); sc->vmx_vlan_detach = NULL; } #ifndef VMXNET3_LEGACY_TX vmxnet3_free_taskqueue(sc); #endif vmxnet3_free_interrupts(sc); if (ifp != NULL) { if_free(ifp); sc->vmx_ifp = NULL; } ifmedia_removeall(&sc->vmx_media); vmxnet3_free_data(sc); vmxnet3_free_resources(sc); vmxnet3_free_rxtx_queues(sc); VMXNET3_CORE_LOCK_DESTROY(sc); return (0); } static int vmxnet3_shutdown(device_t dev) { return (0); } static int vmxnet3_alloc_resources(struct vmxnet3_softc *sc) { device_t dev; int rid; dev = sc->vmx_dev; rid = PCIR_BAR(0); sc->vmx_res0 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->vmx_res0 == NULL) { device_printf(dev, "could not map BAR0 memory\n"); return (ENXIO); } sc->vmx_iot0 = rman_get_bustag(sc->vmx_res0); sc->vmx_ioh0 = rman_get_bushandle(sc->vmx_res0); rid = PCIR_BAR(1); sc->vmx_res1 = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->vmx_res1 == NULL) { device_printf(dev, "could not map BAR1 memory\n"); return (ENXIO); } sc->vmx_iot1 = rman_get_bustag(sc->vmx_res1); sc->vmx_ioh1 = rman_get_bushandle(sc->vmx_res1); if (pci_find_cap(dev, PCIY_MSIX, NULL) == 0) { rid = PCIR_BAR(2); sc->vmx_msix_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); } if (sc->vmx_msix_res == NULL) sc->vmx_flags |= VMXNET3_FLAG_NO_MSIX; return (0); } static void vmxnet3_free_resources(struct vmxnet3_softc *sc) { device_t dev; int rid; dev = sc->vmx_dev; if (sc->vmx_res0 != NULL) { rid = PCIR_BAR(0); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res0); sc->vmx_res0 = NULL; } if (sc->vmx_res1 != NULL) { rid = PCIR_BAR(1); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_res1); sc->vmx_res1 = NULL; } if (sc->vmx_msix_res != NULL) { rid = PCIR_BAR(2); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->vmx_msix_res); sc->vmx_msix_res = NULL; } } static int vmxnet3_check_version(struct vmxnet3_softc *sc) { device_t dev; uint32_t version; dev = sc->vmx_dev; version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_VRRS); if ((version & 0x01) == 0) { device_printf(dev, "unsupported hardware version %#x\n", version); return (ENOTSUP); } vmxnet3_write_bar1(sc, VMXNET3_BAR1_VRRS, 1); version = vmxnet3_read_bar1(sc, VMXNET3_BAR1_UVRS); if ((version & 0x01) == 0) { device_printf(dev, "unsupported UPT version %#x\n", version); return (ENOTSUP); } vmxnet3_write_bar1(sc, VMXNET3_BAR1_UVRS, 1); return (0); } static int trunc_powerof2(int val) { return (1U << (fls(val) - 1)); } static void vmxnet3_initial_config(struct vmxnet3_softc *sc) { int nqueue, ndesc; nqueue = vmxnet3_tunable_int(sc, "txnqueue", vmxnet3_default_txnqueue); if (nqueue > VMXNET3_MAX_TX_QUEUES || nqueue < 1) nqueue = VMXNET3_DEF_TX_QUEUES; if (nqueue > mp_ncpus) nqueue = mp_ncpus; sc->vmx_max_ntxqueues = trunc_powerof2(nqueue); nqueue = vmxnet3_tunable_int(sc, "rxnqueue", vmxnet3_default_rxnqueue); if (nqueue > VMXNET3_MAX_RX_QUEUES || nqueue < 1) nqueue = VMXNET3_DEF_RX_QUEUES; if (nqueue > mp_ncpus) nqueue = mp_ncpus; sc->vmx_max_nrxqueues = trunc_powerof2(nqueue); if (vmxnet3_tunable_int(sc, "mq_disable", vmxnet3_mq_disable)) { sc->vmx_max_nrxqueues = 1; sc->vmx_max_ntxqueues = 1; } ndesc = vmxnet3_tunable_int(sc, "txd", vmxnet3_default_txndesc); if (ndesc > VMXNET3_MAX_TX_NDESC || ndesc < VMXNET3_MIN_TX_NDESC) ndesc = VMXNET3_DEF_TX_NDESC; if (ndesc & VMXNET3_MASK_TX_NDESC) ndesc &= ~VMXNET3_MASK_TX_NDESC; sc->vmx_ntxdescs = ndesc; ndesc = vmxnet3_tunable_int(sc, "rxd", vmxnet3_default_rxndesc); if (ndesc > VMXNET3_MAX_RX_NDESC || ndesc < VMXNET3_MIN_RX_NDESC) ndesc = VMXNET3_DEF_RX_NDESC; if (ndesc & VMXNET3_MASK_RX_NDESC) ndesc &= ~VMXNET3_MASK_RX_NDESC; sc->vmx_nrxdescs = ndesc; sc->vmx_max_rxsegs = VMXNET3_MAX_RX_SEGS; } static void vmxnet3_check_multiqueue(struct vmxnet3_softc *sc) { if (sc->vmx_intr_type != VMXNET3_IT_MSIX) goto out; /* BMV: Just use the maximum configured for now. */ sc->vmx_nrxqueues = sc->vmx_max_nrxqueues; sc->vmx_ntxqueues = sc->vmx_max_ntxqueues; if (sc->vmx_nrxqueues > 1) sc->vmx_flags |= VMXNET3_FLAG_RSS; return; out: sc->vmx_ntxqueues = 1; sc->vmx_nrxqueues = 1; } static int vmxnet3_alloc_msix_interrupts(struct vmxnet3_softc *sc) { device_t dev; int nmsix, cnt, required; dev = sc->vmx_dev; if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) return (1); /* Allocate an additional vector for the events interrupt. */ required = sc->vmx_max_nrxqueues + sc->vmx_max_ntxqueues + 1; nmsix = pci_msix_count(dev); if (nmsix < required) return (1); cnt = required; if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) { sc->vmx_nintrs = required; return (0); } else pci_release_msi(dev); /* BMV TODO Fallback to sharing MSIX vectors if possible. */ return (1); } static int vmxnet3_alloc_msi_interrupts(struct vmxnet3_softc *sc) { device_t dev; int nmsi, cnt, required; dev = sc->vmx_dev; required = 1; nmsi = pci_msi_count(dev); if (nmsi < required) return (1); cnt = required; if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) { sc->vmx_nintrs = 1; return (0); } else pci_release_msi(dev); return (1); } static int vmxnet3_alloc_legacy_interrupts(struct vmxnet3_softc *sc) { sc->vmx_nintrs = 1; return (0); } static int vmxnet3_alloc_interrupt(struct vmxnet3_softc *sc, int rid, int flags, struct vmxnet3_interrupt *intr) { struct resource *irq; irq = bus_alloc_resource_any(sc->vmx_dev, SYS_RES_IRQ, &rid, flags); if (irq == NULL) return (ENXIO); intr->vmxi_irq = irq; intr->vmxi_rid = rid; return (0); } static int vmxnet3_alloc_intr_resources(struct vmxnet3_softc *sc) { int i, rid, flags, error; rid = 0; flags = RF_ACTIVE; if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) flags |= RF_SHAREABLE; else rid = 1; for (i = 0; i < sc->vmx_nintrs; i++, rid++) { error = vmxnet3_alloc_interrupt(sc, rid, flags, &sc->vmx_intrs[i]); if (error) return (error); } return (0); } static int vmxnet3_setup_msix_interrupts(struct vmxnet3_softc *sc) { device_t dev; struct vmxnet3_txqueue *txq; struct vmxnet3_rxqueue *rxq; struct vmxnet3_interrupt *intr; enum intr_type type; int i, error; dev = sc->vmx_dev; intr = &sc->vmx_intrs[0]; type = INTR_TYPE_NET | INTR_MPSAFE; for (i = 0; i < sc->vmx_ntxqueues; i++, intr++) { txq = &sc->vmx_txq[i]; error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL, vmxnet3_txq_intr, txq, &intr->vmxi_handler); if (error) return (error); bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, "tq%d", i); txq->vxtxq_intr_idx = intr->vmxi_rid - 1; } for (i = 0; i < sc->vmx_nrxqueues; i++, intr++) { rxq = &sc->vmx_rxq[i]; error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL, vmxnet3_rxq_intr, rxq, &intr->vmxi_handler); if (error) return (error); bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, "rq%d", i); rxq->vxrxq_intr_idx = intr->vmxi_rid - 1; } error = bus_setup_intr(dev, intr->vmxi_irq, type, NULL, vmxnet3_event_intr, sc, &intr->vmxi_handler); if (error) return (error); bus_describe_intr(dev, intr->vmxi_irq, intr->vmxi_handler, "event"); sc->vmx_event_intr_idx = intr->vmxi_rid - 1; return (0); } static int vmxnet3_setup_legacy_interrupt(struct vmxnet3_softc *sc) { struct vmxnet3_interrupt *intr; int i, error; intr = &sc->vmx_intrs[0]; error = bus_setup_intr(sc->vmx_dev, intr->vmxi_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, vmxnet3_legacy_intr, sc, &intr->vmxi_handler); for (i = 0; i < sc->vmx_ntxqueues; i++) sc->vmx_txq[i].vxtxq_intr_idx = 0; for (i = 0; i < sc->vmx_nrxqueues; i++) sc->vmx_rxq[i].vxrxq_intr_idx = 0; sc->vmx_event_intr_idx = 0; return (error); } static void vmxnet3_set_interrupt_idx(struct vmxnet3_softc *sc) { struct vmxnet3_txqueue *txq; struct vmxnet3_txq_shared *txs; struct vmxnet3_rxqueue *rxq; struct vmxnet3_rxq_shared *rxs; int i; sc->vmx_ds->evintr = sc->vmx_event_intr_idx; for (i = 0; i < sc->vmx_ntxqueues; i++) { txq = &sc->vmx_txq[i]; txs = txq->vxtxq_ts; txs->intr_idx = txq->vxtxq_intr_idx; } for (i = 0; i < sc->vmx_nrxqueues; i++) { rxq = &sc->vmx_rxq[i]; rxs = rxq->vxrxq_rs; rxs->intr_idx = rxq->vxrxq_intr_idx; } } static int vmxnet3_setup_interrupts(struct vmxnet3_softc *sc) { int error; error = vmxnet3_alloc_intr_resources(sc); if (error) return (error); switch (sc->vmx_intr_type) { case VMXNET3_IT_MSIX: error = vmxnet3_setup_msix_interrupts(sc); break; case VMXNET3_IT_MSI: case VMXNET3_IT_LEGACY: error = vmxnet3_setup_legacy_interrupt(sc); break; default: panic("%s: invalid interrupt type %d", __func__, sc->vmx_intr_type); } if (error == 0) vmxnet3_set_interrupt_idx(sc); return (error); } static int vmxnet3_alloc_interrupts(struct vmxnet3_softc *sc) { device_t dev; uint32_t config; int error; dev = sc->vmx_dev; config = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_INTRCFG); sc->vmx_intr_type = config & 0x03; sc->vmx_intr_mask_mode = (config >> 2) & 0x03; switch (sc->vmx_intr_type) { case VMXNET3_IT_AUTO: sc->vmx_intr_type = VMXNET3_IT_MSIX; /* FALLTHROUGH */ case VMXNET3_IT_MSIX: error = vmxnet3_alloc_msix_interrupts(sc); if (error == 0) break; sc->vmx_intr_type = VMXNET3_IT_MSI; /* FALLTHROUGH */ case VMXNET3_IT_MSI: error = vmxnet3_alloc_msi_interrupts(sc); if (error == 0) break; sc->vmx_intr_type = VMXNET3_IT_LEGACY; /* FALLTHROUGH */ case VMXNET3_IT_LEGACY: error = vmxnet3_alloc_legacy_interrupts(sc); if (error == 0) break; /* FALLTHROUGH */ default: sc->vmx_intr_type = -1; device_printf(dev, "cannot allocate any interrupt resources\n"); return (ENXIO); } return (error); } static void vmxnet3_free_interrupt(struct vmxnet3_softc *sc, struct vmxnet3_interrupt *intr) { device_t dev; dev = sc->vmx_dev; if (intr->vmxi_handler != NULL) { bus_teardown_intr(dev, intr->vmxi_irq, intr->vmxi_handler); intr->vmxi_handler = NULL; } if (intr->vmxi_irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, intr->vmxi_rid, intr->vmxi_irq); intr->vmxi_irq = NULL; intr->vmxi_rid = -1; } } static void vmxnet3_free_interrupts(struct vmxnet3_softc *sc) { int i; for (i = 0; i < sc->vmx_nintrs; i++) vmxnet3_free_interrupt(sc, &sc->vmx_intrs[i]); if (sc->vmx_intr_type == VMXNET3_IT_MSI || sc->vmx_intr_type == VMXNET3_IT_MSIX) pci_release_msi(sc->vmx_dev); } #ifndef VMXNET3_LEGACY_TX static int vmxnet3_alloc_taskqueue(struct vmxnet3_softc *sc) { device_t dev; dev = sc->vmx_dev; sc->vmx_tq = taskqueue_create(device_get_nameunit(dev), M_NOWAIT, taskqueue_thread_enqueue, &sc->vmx_tq); if (sc->vmx_tq == NULL) return (ENOMEM); return (0); } static void vmxnet3_start_taskqueue(struct vmxnet3_softc *sc) { device_t dev; int nthreads, error; dev = sc->vmx_dev; /* * The taskqueue is typically not frequently used, so a dedicated * thread for each queue is unnecessary. */ nthreads = MAX(1, sc->vmx_ntxqueues / 2); /* * Most drivers just ignore the return value - it only fails * with ENOMEM so an error is not likely. It is hard for us * to recover from an error here. */ error = taskqueue_start_threads(&sc->vmx_tq, nthreads, PI_NET, "%s taskq", device_get_nameunit(dev)); if (error) device_printf(dev, "failed to start taskqueue: %d", error); } static void vmxnet3_drain_taskqueue(struct vmxnet3_softc *sc) { struct vmxnet3_txqueue *txq; int i; if (sc->vmx_tq != NULL) { for (i = 0; i < sc->vmx_max_ntxqueues; i++) { txq = &sc->vmx_txq[i]; taskqueue_drain(sc->vmx_tq, &txq->vxtxq_defrtask); } } } static void vmxnet3_free_taskqueue(struct vmxnet3_softc *sc) { if (sc->vmx_tq != NULL) { taskqueue_free(sc->vmx_tq); sc->vmx_tq = NULL; } } #endif static int vmxnet3_init_rxq(struct vmxnet3_softc *sc, int q) { struct vmxnet3_rxqueue *rxq; struct vmxnet3_rxring *rxr; int i; rxq = &sc->vmx_rxq[q]; snprintf(rxq->vxrxq_name, sizeof(rxq->vxrxq_name), "%s-rx%d", device_get_nameunit(sc->vmx_dev), q); mtx_init(&rxq->vxrxq_mtx, rxq->vxrxq_name, NULL, MTX_DEF); rxq->vxrxq_sc = sc; rxq->vxrxq_id = q; for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; rxr->vxrxr_rid = i; rxr->vxrxr_ndesc = sc->vmx_nrxdescs; - rxr->vxrxr_rxbuf = malloc(rxr->vxrxr_ndesc * + rxr->vxrxr_rxbuf = mallocarray(rxr->vxrxr_ndesc, sizeof(struct vmxnet3_rxbuf), M_DEVBUF, M_NOWAIT | M_ZERO); if (rxr->vxrxr_rxbuf == NULL) return (ENOMEM); rxq->vxrxq_comp_ring.vxcr_ndesc += sc->vmx_nrxdescs; } return (0); } static int vmxnet3_init_txq(struct vmxnet3_softc *sc, int q) { struct vmxnet3_txqueue *txq; struct vmxnet3_txring *txr; txq = &sc->vmx_txq[q]; txr = &txq->vxtxq_cmd_ring; snprintf(txq->vxtxq_name, sizeof(txq->vxtxq_name), "%s-tx%d", device_get_nameunit(sc->vmx_dev), q); mtx_init(&txq->vxtxq_mtx, txq->vxtxq_name, NULL, MTX_DEF); txq->vxtxq_sc = sc; txq->vxtxq_id = q; txr->vxtxr_ndesc = sc->vmx_ntxdescs; - txr->vxtxr_txbuf = malloc(txr->vxtxr_ndesc * + txr->vxtxr_txbuf = mallocarray(txr->vxtxr_ndesc, sizeof(struct vmxnet3_txbuf), M_DEVBUF, M_NOWAIT | M_ZERO); if (txr->vxtxr_txbuf == NULL) return (ENOMEM); txq->vxtxq_comp_ring.vxcr_ndesc = sc->vmx_ntxdescs; #ifndef VMXNET3_LEGACY_TX TASK_INIT(&txq->vxtxq_defrtask, 0, vmxnet3_txq_tq_deferred, txq); txq->vxtxq_br = buf_ring_alloc(VMXNET3_DEF_BUFRING_SIZE, M_DEVBUF, M_NOWAIT, &txq->vxtxq_mtx); if (txq->vxtxq_br == NULL) return (ENOMEM); #endif return (0); } static int vmxnet3_alloc_rxtx_queues(struct vmxnet3_softc *sc) { int i, error; /* * Only attempt to create multiple queues if MSIX is available. MSIX is * disabled by default because its apparently broken for devices passed * through by at least ESXi 5.1. The hw.pci.honor_msi_blacklist tunable * must be set to zero for MSIX. This check prevents us from allocating * queue structures that we will not use. */ if (sc->vmx_flags & VMXNET3_FLAG_NO_MSIX) { sc->vmx_max_nrxqueues = 1; sc->vmx_max_ntxqueues = 1; } - sc->vmx_rxq = malloc(sizeof(struct vmxnet3_rxqueue) * - sc->vmx_max_nrxqueues, M_DEVBUF, M_NOWAIT | M_ZERO); - sc->vmx_txq = malloc(sizeof(struct vmxnet3_txqueue) * - sc->vmx_max_ntxqueues, M_DEVBUF, M_NOWAIT | M_ZERO); + sc->vmx_rxq = mallocarray(sc->vmx_max_nrxqueues, + sizeof(struct vmxnet3_rxqueue), M_DEVBUF, M_NOWAIT | M_ZERO); + sc->vmx_txq = mallocarray(sc->vmx_max_ntxqueues, + sizeof(struct vmxnet3_txqueue), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vmx_rxq == NULL || sc->vmx_txq == NULL) return (ENOMEM); for (i = 0; i < sc->vmx_max_nrxqueues; i++) { error = vmxnet3_init_rxq(sc, i); if (error) return (error); } for (i = 0; i < sc->vmx_max_ntxqueues; i++) { error = vmxnet3_init_txq(sc, i); if (error) return (error); } return (0); } static void vmxnet3_destroy_rxq(struct vmxnet3_rxqueue *rxq) { struct vmxnet3_rxring *rxr; int i; rxq->vxrxq_sc = NULL; rxq->vxrxq_id = -1; for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; if (rxr->vxrxr_rxbuf != NULL) { free(rxr->vxrxr_rxbuf, M_DEVBUF); rxr->vxrxr_rxbuf = NULL; } } if (mtx_initialized(&rxq->vxrxq_mtx) != 0) mtx_destroy(&rxq->vxrxq_mtx); } static void vmxnet3_destroy_txq(struct vmxnet3_txqueue *txq) { struct vmxnet3_txring *txr; txr = &txq->vxtxq_cmd_ring; txq->vxtxq_sc = NULL; txq->vxtxq_id = -1; #ifndef VMXNET3_LEGACY_TX if (txq->vxtxq_br != NULL) { buf_ring_free(txq->vxtxq_br, M_DEVBUF); txq->vxtxq_br = NULL; } #endif if (txr->vxtxr_txbuf != NULL) { free(txr->vxtxr_txbuf, M_DEVBUF); txr->vxtxr_txbuf = NULL; } if (mtx_initialized(&txq->vxtxq_mtx) != 0) mtx_destroy(&txq->vxtxq_mtx); } static void vmxnet3_free_rxtx_queues(struct vmxnet3_softc *sc) { int i; if (sc->vmx_rxq != NULL) { for (i = 0; i < sc->vmx_max_nrxqueues; i++) vmxnet3_destroy_rxq(&sc->vmx_rxq[i]); free(sc->vmx_rxq, M_DEVBUF); sc->vmx_rxq = NULL; } if (sc->vmx_txq != NULL) { for (i = 0; i < sc->vmx_max_ntxqueues; i++) vmxnet3_destroy_txq(&sc->vmx_txq[i]); free(sc->vmx_txq, M_DEVBUF); sc->vmx_txq = NULL; } } static int vmxnet3_alloc_shared_data(struct vmxnet3_softc *sc) { device_t dev; uint8_t *kva; size_t size; int i, error; dev = sc->vmx_dev; size = sizeof(struct vmxnet3_driver_shared); error = vmxnet3_dma_malloc(sc, size, 1, &sc->vmx_ds_dma); if (error) { device_printf(dev, "cannot alloc shared memory\n"); return (error); } sc->vmx_ds = (struct vmxnet3_driver_shared *) sc->vmx_ds_dma.dma_vaddr; size = sc->vmx_ntxqueues * sizeof(struct vmxnet3_txq_shared) + sc->vmx_nrxqueues * sizeof(struct vmxnet3_rxq_shared); error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_qs_dma); if (error) { device_printf(dev, "cannot alloc queue shared memory\n"); return (error); } sc->vmx_qs = (void *) sc->vmx_qs_dma.dma_vaddr; kva = sc->vmx_qs; for (i = 0; i < sc->vmx_ntxqueues; i++) { sc->vmx_txq[i].vxtxq_ts = (struct vmxnet3_txq_shared *) kva; kva += sizeof(struct vmxnet3_txq_shared); } for (i = 0; i < sc->vmx_nrxqueues; i++) { sc->vmx_rxq[i].vxrxq_rs = (struct vmxnet3_rxq_shared *) kva; kva += sizeof(struct vmxnet3_rxq_shared); } if (sc->vmx_flags & VMXNET3_FLAG_RSS) { size = sizeof(struct vmxnet3_rss_shared); error = vmxnet3_dma_malloc(sc, size, 128, &sc->vmx_rss_dma); if (error) { device_printf(dev, "cannot alloc rss shared memory\n"); return (error); } sc->vmx_rss = (struct vmxnet3_rss_shared *) sc->vmx_rss_dma.dma_vaddr; } return (0); } static void vmxnet3_free_shared_data(struct vmxnet3_softc *sc) { if (sc->vmx_rss != NULL) { vmxnet3_dma_free(sc, &sc->vmx_rss_dma); sc->vmx_rss = NULL; } if (sc->vmx_qs != NULL) { vmxnet3_dma_free(sc, &sc->vmx_qs_dma); sc->vmx_qs = NULL; } if (sc->vmx_ds != NULL) { vmxnet3_dma_free(sc, &sc->vmx_ds_dma); sc->vmx_ds = NULL; } } static int vmxnet3_alloc_txq_data(struct vmxnet3_softc *sc) { device_t dev; struct vmxnet3_txqueue *txq; struct vmxnet3_txring *txr; struct vmxnet3_comp_ring *txc; size_t descsz, compsz; int i, q, error; dev = sc->vmx_dev; for (q = 0; q < sc->vmx_ntxqueues; q++) { txq = &sc->vmx_txq[q]; txr = &txq->vxtxq_cmd_ring; txc = &txq->vxtxq_comp_ring; descsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc); compsz = txr->vxtxr_ndesc * sizeof(struct vmxnet3_txcompdesc); error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ VMXNET3_TX_MAXSIZE, /* maxsize */ VMXNET3_TX_MAXSEGS, /* nsegments */ VMXNET3_TX_MAXSEGSIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &txr->vxtxr_txtag); if (error) { device_printf(dev, "unable to create Tx buffer tag for queue %d\n", q); return (error); } error = vmxnet3_dma_malloc(sc, descsz, 512, &txr->vxtxr_dma); if (error) { device_printf(dev, "cannot alloc Tx descriptors for " "queue %d error %d\n", q, error); return (error); } txr->vxtxr_txd = (struct vmxnet3_txdesc *) txr->vxtxr_dma.dma_vaddr; error = vmxnet3_dma_malloc(sc, compsz, 512, &txc->vxcr_dma); if (error) { device_printf(dev, "cannot alloc Tx comp descriptors " "for queue %d error %d\n", q, error); return (error); } txc->vxcr_u.txcd = (struct vmxnet3_txcompdesc *) txc->vxcr_dma.dma_vaddr; for (i = 0; i < txr->vxtxr_ndesc; i++) { error = bus_dmamap_create(txr->vxtxr_txtag, 0, &txr->vxtxr_txbuf[i].vtxb_dmamap); if (error) { device_printf(dev, "unable to create Tx buf " "dmamap for queue %d idx %d\n", q, i); return (error); } } } return (0); } static void vmxnet3_free_txq_data(struct vmxnet3_softc *sc) { device_t dev; struct vmxnet3_txqueue *txq; struct vmxnet3_txring *txr; struct vmxnet3_comp_ring *txc; struct vmxnet3_txbuf *txb; int i, q; dev = sc->vmx_dev; for (q = 0; q < sc->vmx_ntxqueues; q++) { txq = &sc->vmx_txq[q]; txr = &txq->vxtxq_cmd_ring; txc = &txq->vxtxq_comp_ring; for (i = 0; i < txr->vxtxr_ndesc; i++) { txb = &txr->vxtxr_txbuf[i]; if (txb->vtxb_dmamap != NULL) { bus_dmamap_destroy(txr->vxtxr_txtag, txb->vtxb_dmamap); txb->vtxb_dmamap = NULL; } } if (txc->vxcr_u.txcd != NULL) { vmxnet3_dma_free(sc, &txc->vxcr_dma); txc->vxcr_u.txcd = NULL; } if (txr->vxtxr_txd != NULL) { vmxnet3_dma_free(sc, &txr->vxtxr_dma); txr->vxtxr_txd = NULL; } if (txr->vxtxr_txtag != NULL) { bus_dma_tag_destroy(txr->vxtxr_txtag); txr->vxtxr_txtag = NULL; } } } static int vmxnet3_alloc_rxq_data(struct vmxnet3_softc *sc) { device_t dev; struct vmxnet3_rxqueue *rxq; struct vmxnet3_rxring *rxr; struct vmxnet3_comp_ring *rxc; int descsz, compsz; int i, j, q, error; dev = sc->vmx_dev; for (q = 0; q < sc->vmx_nrxqueues; q++) { rxq = &sc->vmx_rxq[q]; rxc = &rxq->vxrxq_comp_ring; compsz = 0; for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; descsz = rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc); compsz += rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxcompdesc); error = bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MJUMPAGESIZE, /* maxsize */ 1, /* nsegments */ MJUMPAGESIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &rxr->vxrxr_rxtag); if (error) { device_printf(dev, "unable to create Rx buffer tag for " "queue %d\n", q); return (error); } error = vmxnet3_dma_malloc(sc, descsz, 512, &rxr->vxrxr_dma); if (error) { device_printf(dev, "cannot allocate Rx " "descriptors for queue %d/%d error %d\n", i, q, error); return (error); } rxr->vxrxr_rxd = (struct vmxnet3_rxdesc *) rxr->vxrxr_dma.dma_vaddr; } error = vmxnet3_dma_malloc(sc, compsz, 512, &rxc->vxcr_dma); if (error) { device_printf(dev, "cannot alloc Rx comp descriptors " "for queue %d error %d\n", q, error); return (error); } rxc->vxcr_u.rxcd = (struct vmxnet3_rxcompdesc *) rxc->vxcr_dma.dma_vaddr; for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; error = bus_dmamap_create(rxr->vxrxr_rxtag, 0, &rxr->vxrxr_spare_dmap); if (error) { device_printf(dev, "unable to create spare " "dmamap for queue %d/%d error %d\n", q, i, error); return (error); } for (j = 0; j < rxr->vxrxr_ndesc; j++) { error = bus_dmamap_create(rxr->vxrxr_rxtag, 0, &rxr->vxrxr_rxbuf[j].vrxb_dmamap); if (error) { device_printf(dev, "unable to create " "dmamap for queue %d/%d slot %d " "error %d\n", q, i, j, error); return (error); } } } } return (0); } static void vmxnet3_free_rxq_data(struct vmxnet3_softc *sc) { device_t dev; struct vmxnet3_rxqueue *rxq; struct vmxnet3_rxring *rxr; struct vmxnet3_comp_ring *rxc; struct vmxnet3_rxbuf *rxb; int i, j, q; dev = sc->vmx_dev; for (q = 0; q < sc->vmx_nrxqueues; q++) { rxq = &sc->vmx_rxq[q]; rxc = &rxq->vxrxq_comp_ring; for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; if (rxr->vxrxr_spare_dmap != NULL) { bus_dmamap_destroy(rxr->vxrxr_rxtag, rxr->vxrxr_spare_dmap); rxr->vxrxr_spare_dmap = NULL; } for (j = 0; j < rxr->vxrxr_ndesc; j++) { rxb = &rxr->vxrxr_rxbuf[j]; if (rxb->vrxb_dmamap != NULL) { bus_dmamap_destroy(rxr->vxrxr_rxtag, rxb->vrxb_dmamap); rxb->vrxb_dmamap = NULL; } } } if (rxc->vxcr_u.rxcd != NULL) { vmxnet3_dma_free(sc, &rxc->vxcr_dma); rxc->vxcr_u.rxcd = NULL; } for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; if (rxr->vxrxr_rxd != NULL) { vmxnet3_dma_free(sc, &rxr->vxrxr_dma); rxr->vxrxr_rxd = NULL; } if (rxr->vxrxr_rxtag != NULL) { bus_dma_tag_destroy(rxr->vxrxr_rxtag); rxr->vxrxr_rxtag = NULL; } } } } static int vmxnet3_alloc_queue_data(struct vmxnet3_softc *sc) { int error; error = vmxnet3_alloc_txq_data(sc); if (error) return (error); error = vmxnet3_alloc_rxq_data(sc); if (error) return (error); return (0); } static void vmxnet3_free_queue_data(struct vmxnet3_softc *sc) { if (sc->vmx_rxq != NULL) vmxnet3_free_rxq_data(sc); if (sc->vmx_txq != NULL) vmxnet3_free_txq_data(sc); } static int vmxnet3_alloc_mcast_table(struct vmxnet3_softc *sc) { int error; error = vmxnet3_dma_malloc(sc, VMXNET3_MULTICAST_MAX * ETHER_ADDR_LEN, 32, &sc->vmx_mcast_dma); if (error) device_printf(sc->vmx_dev, "unable to alloc multicast table\n"); else sc->vmx_mcast = sc->vmx_mcast_dma.dma_vaddr; return (error); } static void vmxnet3_free_mcast_table(struct vmxnet3_softc *sc) { if (sc->vmx_mcast != NULL) { vmxnet3_dma_free(sc, &sc->vmx_mcast_dma); sc->vmx_mcast = NULL; } } static void vmxnet3_init_shared_data(struct vmxnet3_softc *sc) { struct vmxnet3_driver_shared *ds; struct vmxnet3_txqueue *txq; struct vmxnet3_txq_shared *txs; struct vmxnet3_rxqueue *rxq; struct vmxnet3_rxq_shared *rxs; int i; ds = sc->vmx_ds; /* * Initialize fields of the shared data that remains the same across * reinits. Note the shared data is zero'd when allocated. */ ds->magic = VMXNET3_REV1_MAGIC; /* DriverInfo */ ds->version = VMXNET3_DRIVER_VERSION; ds->guest = VMXNET3_GOS_FREEBSD | #ifdef __LP64__ VMXNET3_GOS_64BIT; #else VMXNET3_GOS_32BIT; #endif ds->vmxnet3_revision = 1; ds->upt_version = 1; /* Misc. conf */ ds->driver_data = vtophys(sc); ds->driver_data_len = sizeof(struct vmxnet3_softc); ds->queue_shared = sc->vmx_qs_dma.dma_paddr; ds->queue_shared_len = sc->vmx_qs_dma.dma_size; ds->nrxsg_max = sc->vmx_max_rxsegs; /* RSS conf */ if (sc->vmx_flags & VMXNET3_FLAG_RSS) { ds->rss.version = 1; ds->rss.paddr = sc->vmx_rss_dma.dma_paddr; ds->rss.len = sc->vmx_rss_dma.dma_size; } /* Interrupt control. */ ds->automask = sc->vmx_intr_mask_mode == VMXNET3_IMM_AUTO; ds->nintr = sc->vmx_nintrs; ds->evintr = sc->vmx_event_intr_idx; ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL; for (i = 0; i < sc->vmx_nintrs; i++) ds->modlevel[i] = UPT1_IMOD_ADAPTIVE; /* Receive filter. */ ds->mcast_table = sc->vmx_mcast_dma.dma_paddr; ds->mcast_tablelen = sc->vmx_mcast_dma.dma_size; /* Tx queues */ for (i = 0; i < sc->vmx_ntxqueues; i++) { txq = &sc->vmx_txq[i]; txs = txq->vxtxq_ts; txs->cmd_ring = txq->vxtxq_cmd_ring.vxtxr_dma.dma_paddr; txs->cmd_ring_len = txq->vxtxq_cmd_ring.vxtxr_ndesc; txs->comp_ring = txq->vxtxq_comp_ring.vxcr_dma.dma_paddr; txs->comp_ring_len = txq->vxtxq_comp_ring.vxcr_ndesc; txs->driver_data = vtophys(txq); txs->driver_data_len = sizeof(struct vmxnet3_txqueue); } /* Rx queues */ for (i = 0; i < sc->vmx_nrxqueues; i++) { rxq = &sc->vmx_rxq[i]; rxs = rxq->vxrxq_rs; rxs->cmd_ring[0] = rxq->vxrxq_cmd_ring[0].vxrxr_dma.dma_paddr; rxs->cmd_ring_len[0] = rxq->vxrxq_cmd_ring[0].vxrxr_ndesc; rxs->cmd_ring[1] = rxq->vxrxq_cmd_ring[1].vxrxr_dma.dma_paddr; rxs->cmd_ring_len[1] = rxq->vxrxq_cmd_ring[1].vxrxr_ndesc; rxs->comp_ring = rxq->vxrxq_comp_ring.vxcr_dma.dma_paddr; rxs->comp_ring_len = rxq->vxrxq_comp_ring.vxcr_ndesc; rxs->driver_data = vtophys(rxq); rxs->driver_data_len = sizeof(struct vmxnet3_rxqueue); } } static void vmxnet3_init_hwassist(struct vmxnet3_softc *sc) { struct ifnet *ifp = sc->vmx_ifp; uint64_t hwassist; hwassist = 0; if (ifp->if_capenable & IFCAP_TXCSUM) hwassist |= VMXNET3_CSUM_OFFLOAD; if (ifp->if_capenable & IFCAP_TXCSUM_IPV6) hwassist |= VMXNET3_CSUM_OFFLOAD_IPV6; if (ifp->if_capenable & IFCAP_TSO4) hwassist |= CSUM_IP_TSO; if (ifp->if_capenable & IFCAP_TSO6) hwassist |= CSUM_IP6_TSO; ifp->if_hwassist = hwassist; } static void vmxnet3_reinit_interface(struct vmxnet3_softc *sc) { struct ifnet *ifp; ifp = sc->vmx_ifp; /* Use the current MAC address. */ bcopy(IF_LLADDR(sc->vmx_ifp), sc->vmx_lladdr, ETHER_ADDR_LEN); vmxnet3_set_lladdr(sc); vmxnet3_init_hwassist(sc); } static void vmxnet3_reinit_rss_shared_data(struct vmxnet3_softc *sc) { /* * Use the same key as the Linux driver until FreeBSD can do * RSS (presumably Toeplitz) in software. */ static const uint8_t rss_key[UPT1_RSS_MAX_KEY_SIZE] = { 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac, 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28, 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70, 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3, 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9, }; struct vmxnet3_driver_shared *ds; struct vmxnet3_rss_shared *rss; int i; ds = sc->vmx_ds; rss = sc->vmx_rss; rss->hash_type = UPT1_RSS_HASH_TYPE_IPV4 | UPT1_RSS_HASH_TYPE_TCP_IPV4 | UPT1_RSS_HASH_TYPE_IPV6 | UPT1_RSS_HASH_TYPE_TCP_IPV6; rss->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ; rss->hash_key_size = UPT1_RSS_MAX_KEY_SIZE; rss->ind_table_size = UPT1_RSS_MAX_IND_TABLE_SIZE; memcpy(rss->hash_key, rss_key, UPT1_RSS_MAX_KEY_SIZE); for (i = 0; i < UPT1_RSS_MAX_IND_TABLE_SIZE; i++) rss->ind_table[i] = i % sc->vmx_nrxqueues; } static void vmxnet3_reinit_shared_data(struct vmxnet3_softc *sc) { struct ifnet *ifp; struct vmxnet3_driver_shared *ds; ifp = sc->vmx_ifp; ds = sc->vmx_ds; ds->mtu = ifp->if_mtu; ds->ntxqueue = sc->vmx_ntxqueues; ds->nrxqueue = sc->vmx_nrxqueues; ds->upt_features = 0; if (ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6)) ds->upt_features |= UPT1_F_CSUM; if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) ds->upt_features |= UPT1_F_VLAN; if (ifp->if_capenable & IFCAP_LRO) ds->upt_features |= UPT1_F_LRO; if (sc->vmx_flags & VMXNET3_FLAG_RSS) { ds->upt_features |= UPT1_F_RSS; vmxnet3_reinit_rss_shared_data(sc); } vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSL, sc->vmx_ds_dma.dma_paddr); vmxnet3_write_bar1(sc, VMXNET3_BAR1_DSH, (uint64_t) sc->vmx_ds_dma.dma_paddr >> 32); } static int vmxnet3_alloc_data(struct vmxnet3_softc *sc) { int error; error = vmxnet3_alloc_shared_data(sc); if (error) return (error); error = vmxnet3_alloc_queue_data(sc); if (error) return (error); error = vmxnet3_alloc_mcast_table(sc); if (error) return (error); vmxnet3_init_shared_data(sc); return (0); } static void vmxnet3_free_data(struct vmxnet3_softc *sc) { vmxnet3_free_mcast_table(sc); vmxnet3_free_queue_data(sc); vmxnet3_free_shared_data(sc); } static int vmxnet3_setup_interface(struct vmxnet3_softc *sc) { device_t dev; struct ifnet *ifp; dev = sc->vmx_dev; ifp = sc->vmx_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "cannot allocate ifnet structure\n"); return (ENOSPC); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); #if __FreeBSD_version < 1000025 ifp->if_baudrate = 1000000000; #elif __FreeBSD_version < 1100011 if_initbaudrate(ifp, IF_Gbps(10)); #else ifp->if_baudrate = IF_Gbps(10); #endif ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_init = vmxnet3_init; ifp->if_ioctl = vmxnet3_ioctl; ifp->if_get_counter = vmxnet3_get_counter; ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); ifp->if_hw_tsomaxsegcount = VMXNET3_TX_MAXSEGS; ifp->if_hw_tsomaxsegsize = VMXNET3_TX_MAXSEGSIZE; #ifdef VMXNET3_LEGACY_TX ifp->if_start = vmxnet3_start; ifp->if_snd.ifq_drv_maxlen = sc->vmx_ntxdescs - 1; IFQ_SET_MAXLEN(&ifp->if_snd, sc->vmx_ntxdescs - 1); IFQ_SET_READY(&ifp->if_snd); #else ifp->if_transmit = vmxnet3_txq_mq_start; ifp->if_qflush = vmxnet3_qflush; #endif vmxnet3_get_lladdr(sc); ether_ifattach(ifp, sc->vmx_lladdr); ifp->if_capabilities |= IFCAP_RXCSUM | IFCAP_TXCSUM; ifp->if_capabilities |= IFCAP_RXCSUM_IPV6 | IFCAP_TXCSUM_IPV6; ifp->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6; ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM; ifp->if_capenable = ifp->if_capabilities; /* These capabilities are not enabled by default. */ ifp->if_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER; sc->vmx_vlan_attach = EVENTHANDLER_REGISTER(vlan_config, vmxnet3_register_vlan, sc, EVENTHANDLER_PRI_FIRST); sc->vmx_vlan_detach = EVENTHANDLER_REGISTER(vlan_config, vmxnet3_unregister_vlan, sc, EVENTHANDLER_PRI_FIRST); ifmedia_init(&sc->vmx_media, 0, vmxnet3_media_change, vmxnet3_media_status); ifmedia_add(&sc->vmx_media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->vmx_media, IFM_ETHER | IFM_AUTO); return (0); } static void vmxnet3_evintr(struct vmxnet3_softc *sc) { device_t dev; struct ifnet *ifp; struct vmxnet3_txq_shared *ts; struct vmxnet3_rxq_shared *rs; uint32_t event; int reset; dev = sc->vmx_dev; ifp = sc->vmx_ifp; reset = 0; VMXNET3_CORE_LOCK(sc); /* Clear events. */ event = sc->vmx_ds->event; vmxnet3_write_bar1(sc, VMXNET3_BAR1_EVENT, event); if (event & VMXNET3_EVENT_LINK) { vmxnet3_link_status(sc); if (sc->vmx_link_active != 0) vmxnet3_tx_start_all(sc); } if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) { reset = 1; vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_STATUS); ts = sc->vmx_txq[0].vxtxq_ts; if (ts->stopped != 0) device_printf(dev, "Tx queue error %#x\n", ts->error); rs = sc->vmx_rxq[0].vxrxq_rs; if (rs->stopped != 0) device_printf(dev, "Rx queue error %#x\n", rs->error); device_printf(dev, "Rx/Tx queue error event ... resetting\n"); } if (event & VMXNET3_EVENT_DIC) device_printf(dev, "device implementation change event\n"); if (event & VMXNET3_EVENT_DEBUG) device_printf(dev, "debug event\n"); if (reset != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vmxnet3_init_locked(sc); } VMXNET3_CORE_UNLOCK(sc); } static void vmxnet3_txq_eof(struct vmxnet3_txqueue *txq) { struct vmxnet3_softc *sc; struct ifnet *ifp; struct vmxnet3_txring *txr; struct vmxnet3_comp_ring *txc; struct vmxnet3_txcompdesc *txcd; struct vmxnet3_txbuf *txb; struct mbuf *m; u_int sop; sc = txq->vxtxq_sc; ifp = sc->vmx_ifp; txr = &txq->vxtxq_cmd_ring; txc = &txq->vxtxq_comp_ring; VMXNET3_TXQ_LOCK_ASSERT(txq); for (;;) { txcd = &txc->vxcr_u.txcd[txc->vxcr_next]; if (txcd->gen != txc->vxcr_gen) break; vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); if (++txc->vxcr_next == txc->vxcr_ndesc) { txc->vxcr_next = 0; txc->vxcr_gen ^= 1; } sop = txr->vxtxr_next; txb = &txr->vxtxr_txbuf[sop]; if ((m = txb->vtxb_m) != NULL) { bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap); txq->vxtxq_stats.vmtxs_opackets++; txq->vxtxq_stats.vmtxs_obytes += m->m_pkthdr.len; if (m->m_flags & M_MCAST) txq->vxtxq_stats.vmtxs_omcasts++; m_freem(m); txb->vtxb_m = NULL; } txr->vxtxr_next = (txcd->eop_idx + 1) % txr->vxtxr_ndesc; } if (txr->vxtxr_head == txr->vxtxr_next) txq->vxtxq_watchdog = 0; } static int vmxnet3_newbuf(struct vmxnet3_softc *sc, struct vmxnet3_rxring *rxr) { struct ifnet *ifp; struct mbuf *m; struct vmxnet3_rxdesc *rxd; struct vmxnet3_rxbuf *rxb; bus_dma_tag_t tag; bus_dmamap_t dmap; bus_dma_segment_t segs[1]; int idx, clsize, btype, flags, nsegs, error; ifp = sc->vmx_ifp; tag = rxr->vxrxr_rxtag; dmap = rxr->vxrxr_spare_dmap; idx = rxr->vxrxr_fill; rxd = &rxr->vxrxr_rxd[idx]; rxb = &rxr->vxrxr_rxbuf[idx]; #ifdef VMXNET3_FAILPOINTS KFAIL_POINT_CODE(VMXNET3_FP, newbuf, return ENOBUFS); if (rxr->vxrxr_rid != 0) KFAIL_POINT_CODE(VMXNET3_FP, newbuf_body_only, return ENOBUFS); #endif if (rxr->vxrxr_rid == 0 && (idx % sc->vmx_rx_max_chain) == 0) { flags = M_PKTHDR; clsize = MCLBYTES; btype = VMXNET3_BTYPE_HEAD; } else { #if __FreeBSD_version < 902001 /* * These mbufs will never be used for the start of a frame. * Roughly prior to branching releng/9.2, the load_mbuf_sg() * required the mbuf to always be a packet header. Avoid * unnecessary mbuf initialization in newer versions where * that is not the case. */ flags = M_PKTHDR; #else flags = 0; #endif clsize = MJUMPAGESIZE; btype = VMXNET3_BTYPE_BODY; } m = m_getjcl(M_NOWAIT, MT_DATA, flags, clsize); if (m == NULL) { sc->vmx_stats.vmst_mgetcl_failed++; return (ENOBUFS); } if (btype == VMXNET3_BTYPE_HEAD) { m->m_len = m->m_pkthdr.len = clsize; m_adj(m, ETHER_ALIGN); } else m->m_len = clsize; error = bus_dmamap_load_mbuf_sg(tag, dmap, m, &segs[0], &nsegs, BUS_DMA_NOWAIT); if (error) { m_freem(m); sc->vmx_stats.vmst_mbuf_load_failed++; return (error); } KASSERT(nsegs == 1, ("%s: mbuf %p with too many segments %d", __func__, m, nsegs)); #if __FreeBSD_version < 902001 if (btype == VMXNET3_BTYPE_BODY) m->m_flags &= ~M_PKTHDR; #endif if (rxb->vrxb_m != NULL) { bus_dmamap_sync(tag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(tag, rxb->vrxb_dmamap); } rxr->vxrxr_spare_dmap = rxb->vrxb_dmamap; rxb->vrxb_dmamap = dmap; rxb->vrxb_m = m; rxd->addr = segs[0].ds_addr; rxd->len = segs[0].ds_len; rxd->btype = btype; rxd->gen = rxr->vxrxr_gen; vmxnet3_rxr_increment_fill(rxr); return (0); } static void vmxnet3_rxq_eof_discard(struct vmxnet3_rxqueue *rxq, struct vmxnet3_rxring *rxr, int idx) { struct vmxnet3_rxdesc *rxd; rxd = &rxr->vxrxr_rxd[idx]; rxd->gen = rxr->vxrxr_gen; vmxnet3_rxr_increment_fill(rxr); } static void vmxnet3_rxq_discard_chain(struct vmxnet3_rxqueue *rxq) { struct vmxnet3_softc *sc; struct vmxnet3_rxring *rxr; struct vmxnet3_comp_ring *rxc; struct vmxnet3_rxcompdesc *rxcd; int idx, eof; sc = rxq->vxrxq_sc; rxc = &rxq->vxrxq_comp_ring; do { rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next]; if (rxcd->gen != rxc->vxcr_gen) break; /* Not expected. */ vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); if (++rxc->vxcr_next == rxc->vxcr_ndesc) { rxc->vxcr_next = 0; rxc->vxcr_gen ^= 1; } idx = rxcd->rxd_idx; eof = rxcd->eop; if (rxcd->qid < sc->vmx_nrxqueues) rxr = &rxq->vxrxq_cmd_ring[0]; else rxr = &rxq->vxrxq_cmd_ring[1]; vmxnet3_rxq_eof_discard(rxq, rxr, idx); } while (!eof); } static void vmxnet3_rx_csum(struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m) { if (rxcd->ipv4) { m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; if (rxcd->ipcsum_ok) m->m_pkthdr.csum_flags |= CSUM_IP_VALID; } if (!rxcd->fragment) { if (rxcd->csum_ok && (rxcd->tcp || rxcd->udp)) { m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xFFFF; } } } static void vmxnet3_rxq_input(struct vmxnet3_rxqueue *rxq, struct vmxnet3_rxcompdesc *rxcd, struct mbuf *m) { struct vmxnet3_softc *sc; struct ifnet *ifp; sc = rxq->vxrxq_sc; ifp = sc->vmx_ifp; if (rxcd->error) { rxq->vxrxq_stats.vmrxs_ierrors++; m_freem(m); return; } #ifdef notyet switch (rxcd->rss_type) { case VMXNET3_RCD_RSS_TYPE_IPV4: m->m_pkthdr.flowid = rxcd->rss_hash; M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV4); break; case VMXNET3_RCD_RSS_TYPE_TCPIPV4: m->m_pkthdr.flowid = rxcd->rss_hash; M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV4); break; case VMXNET3_RCD_RSS_TYPE_IPV6: m->m_pkthdr.flowid = rxcd->rss_hash; M_HASHTYPE_SET(m, M_HASHTYPE_RSS_IPV6); break; case VMXNET3_RCD_RSS_TYPE_TCPIPV6: m->m_pkthdr.flowid = rxcd->rss_hash; M_HASHTYPE_SET(m, M_HASHTYPE_RSS_TCP_IPV6); break; default: /* VMXNET3_RCD_RSS_TYPE_NONE */ m->m_pkthdr.flowid = rxq->vxrxq_id; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); break; } #else m->m_pkthdr.flowid = rxq->vxrxq_id; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); #endif if (!rxcd->no_csum) vmxnet3_rx_csum(rxcd, m); if (rxcd->vlan) { m->m_flags |= M_VLANTAG; m->m_pkthdr.ether_vtag = rxcd->vtag; } rxq->vxrxq_stats.vmrxs_ipackets++; rxq->vxrxq_stats.vmrxs_ibytes += m->m_pkthdr.len; VMXNET3_RXQ_UNLOCK(rxq); (*ifp->if_input)(ifp, m); VMXNET3_RXQ_LOCK(rxq); } static void vmxnet3_rxq_eof(struct vmxnet3_rxqueue *rxq) { struct vmxnet3_softc *sc; struct ifnet *ifp; struct vmxnet3_rxring *rxr; struct vmxnet3_comp_ring *rxc; struct vmxnet3_rxdesc *rxd; struct vmxnet3_rxcompdesc *rxcd; struct mbuf *m, *m_head, *m_tail; int idx, length; sc = rxq->vxrxq_sc; ifp = sc->vmx_ifp; rxc = &rxq->vxrxq_comp_ring; VMXNET3_RXQ_LOCK_ASSERT(rxq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; m_head = rxq->vxrxq_mhead; rxq->vxrxq_mhead = NULL; m_tail = rxq->vxrxq_mtail; rxq->vxrxq_mtail = NULL; MPASS(m_head == NULL || m_tail != NULL); for (;;) { rxcd = &rxc->vxcr_u.rxcd[rxc->vxcr_next]; if (rxcd->gen != rxc->vxcr_gen) { rxq->vxrxq_mhead = m_head; rxq->vxrxq_mtail = m_tail; break; } vmxnet3_barrier(sc, VMXNET3_BARRIER_RD); if (++rxc->vxcr_next == rxc->vxcr_ndesc) { rxc->vxcr_next = 0; rxc->vxcr_gen ^= 1; } idx = rxcd->rxd_idx; length = rxcd->len; if (rxcd->qid < sc->vmx_nrxqueues) rxr = &rxq->vxrxq_cmd_ring[0]; else rxr = &rxq->vxrxq_cmd_ring[1]; rxd = &rxr->vxrxr_rxd[idx]; m = rxr->vxrxr_rxbuf[idx].vrxb_m; KASSERT(m != NULL, ("%s: queue %d idx %d without mbuf", __func__, rxcd->qid, idx)); /* * The host may skip descriptors. We detect this when this * descriptor does not match the previous fill index. Catch * up with the host now. */ if (__predict_false(rxr->vxrxr_fill != idx)) { while (rxr->vxrxr_fill != idx) { rxr->vxrxr_rxd[rxr->vxrxr_fill].gen = rxr->vxrxr_gen; vmxnet3_rxr_increment_fill(rxr); } } if (rxcd->sop) { KASSERT(rxd->btype == VMXNET3_BTYPE_HEAD, ("%s: start of frame w/o head buffer", __func__)); KASSERT(rxr == &rxq->vxrxq_cmd_ring[0], ("%s: start of frame not in ring 0", __func__)); KASSERT((idx % sc->vmx_rx_max_chain) == 0, ("%s: start of frame at unexcepted index %d (%d)", __func__, idx, sc->vmx_rx_max_chain)); KASSERT(m_head == NULL, ("%s: duplicate start of frame?", __func__)); if (length == 0) { /* Just ignore this descriptor. */ vmxnet3_rxq_eof_discard(rxq, rxr, idx); goto nextp; } if (vmxnet3_newbuf(sc, rxr) != 0) { rxq->vxrxq_stats.vmrxs_iqdrops++; vmxnet3_rxq_eof_discard(rxq, rxr, idx); if (!rxcd->eop) vmxnet3_rxq_discard_chain(rxq); goto nextp; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = length; m->m_pkthdr.csum_flags = 0; m_head = m_tail = m; } else { KASSERT(rxd->btype == VMXNET3_BTYPE_BODY, ("%s: non start of frame w/o body buffer", __func__)); if (m_head == NULL && m_tail == NULL) { /* * This is a continuation of a packet that we * started to drop, but could not drop entirely * because this segment was still owned by the * host. So, drop the remainder now. */ vmxnet3_rxq_eof_discard(rxq, rxr, idx); if (!rxcd->eop) vmxnet3_rxq_discard_chain(rxq); goto nextp; } KASSERT(m_head != NULL, ("%s: frame not started?", __func__)); if (vmxnet3_newbuf(sc, rxr) != 0) { rxq->vxrxq_stats.vmrxs_iqdrops++; vmxnet3_rxq_eof_discard(rxq, rxr, idx); if (!rxcd->eop) vmxnet3_rxq_discard_chain(rxq); m_freem(m_head); m_head = m_tail = NULL; goto nextp; } m->m_len = length; m_head->m_pkthdr.len += length; m_tail->m_next = m; m_tail = m; } if (rxcd->eop) { vmxnet3_rxq_input(rxq, rxcd, m_head); m_head = m_tail = NULL; /* Must recheck after dropping the Rx lock. */ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) break; } nextp: if (__predict_false(rxq->vxrxq_rs->update_rxhead)) { int qid = rxcd->qid; bus_size_t r; idx = (idx + 1) % rxr->vxrxr_ndesc; if (qid >= sc->vmx_nrxqueues) { qid -= sc->vmx_nrxqueues; r = VMXNET3_BAR0_RXH2(qid); } else r = VMXNET3_BAR0_RXH1(qid); vmxnet3_write_bar0(sc, r, idx); } } } static void vmxnet3_legacy_intr(void *xsc) { struct vmxnet3_softc *sc; struct vmxnet3_rxqueue *rxq; struct vmxnet3_txqueue *txq; sc = xsc; rxq = &sc->vmx_rxq[0]; txq = &sc->vmx_txq[0]; if (sc->vmx_intr_type == VMXNET3_IT_LEGACY) { if (vmxnet3_read_bar1(sc, VMXNET3_BAR1_INTR) == 0) return; } if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) vmxnet3_disable_all_intrs(sc); if (sc->vmx_ds->event != 0) vmxnet3_evintr(sc); VMXNET3_RXQ_LOCK(rxq); vmxnet3_rxq_eof(rxq); VMXNET3_RXQ_UNLOCK(rxq); VMXNET3_TXQ_LOCK(txq); vmxnet3_txq_eof(txq); vmxnet3_txq_start(txq); VMXNET3_TXQ_UNLOCK(txq); vmxnet3_enable_all_intrs(sc); } static void vmxnet3_txq_intr(void *xtxq) { struct vmxnet3_softc *sc; struct vmxnet3_txqueue *txq; txq = xtxq; sc = txq->vxtxq_sc; if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) vmxnet3_disable_intr(sc, txq->vxtxq_intr_idx); VMXNET3_TXQ_LOCK(txq); vmxnet3_txq_eof(txq); vmxnet3_txq_start(txq); VMXNET3_TXQ_UNLOCK(txq); vmxnet3_enable_intr(sc, txq->vxtxq_intr_idx); } static void vmxnet3_rxq_intr(void *xrxq) { struct vmxnet3_softc *sc; struct vmxnet3_rxqueue *rxq; rxq = xrxq; sc = rxq->vxrxq_sc; if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) vmxnet3_disable_intr(sc, rxq->vxrxq_intr_idx); VMXNET3_RXQ_LOCK(rxq); vmxnet3_rxq_eof(rxq); VMXNET3_RXQ_UNLOCK(rxq); vmxnet3_enable_intr(sc, rxq->vxrxq_intr_idx); } static void vmxnet3_event_intr(void *xsc) { struct vmxnet3_softc *sc; sc = xsc; if (sc->vmx_intr_mask_mode == VMXNET3_IMM_ACTIVE) vmxnet3_disable_intr(sc, sc->vmx_event_intr_idx); if (sc->vmx_ds->event != 0) vmxnet3_evintr(sc); vmxnet3_enable_intr(sc, sc->vmx_event_intr_idx); } static void vmxnet3_txstop(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq) { struct vmxnet3_txring *txr; struct vmxnet3_txbuf *txb; int i; txr = &txq->vxtxq_cmd_ring; for (i = 0; i < txr->vxtxr_ndesc; i++) { txb = &txr->vxtxr_txbuf[i]; if (txb->vtxb_m == NULL) continue; bus_dmamap_sync(txr->vxtxr_txtag, txb->vtxb_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txr->vxtxr_txtag, txb->vtxb_dmamap); m_freem(txb->vtxb_m); txb->vtxb_m = NULL; } } static void vmxnet3_rxstop(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq) { struct vmxnet3_rxring *rxr; struct vmxnet3_rxbuf *rxb; int i, j; if (rxq->vxrxq_mhead != NULL) { m_freem(rxq->vxrxq_mhead); rxq->vxrxq_mhead = NULL; rxq->vxrxq_mtail = NULL; } for (i = 0; i < VMXNET3_RXRINGS_PERQ; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; for (j = 0; j < rxr->vxrxr_ndesc; j++) { rxb = &rxr->vxrxr_rxbuf[j]; if (rxb->vrxb_m == NULL) continue; bus_dmamap_sync(rxr->vxrxr_rxtag, rxb->vrxb_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rxr->vxrxr_rxtag, rxb->vrxb_dmamap); m_freem(rxb->vrxb_m); rxb->vrxb_m = NULL; } } } static void vmxnet3_stop_rendezvous(struct vmxnet3_softc *sc) { struct vmxnet3_rxqueue *rxq; struct vmxnet3_txqueue *txq; int i; for (i = 0; i < sc->vmx_nrxqueues; i++) { rxq = &sc->vmx_rxq[i]; VMXNET3_RXQ_LOCK(rxq); VMXNET3_RXQ_UNLOCK(rxq); } for (i = 0; i < sc->vmx_ntxqueues; i++) { txq = &sc->vmx_txq[i]; VMXNET3_TXQ_LOCK(txq); VMXNET3_TXQ_UNLOCK(txq); } } static void vmxnet3_stop(struct vmxnet3_softc *sc) { struct ifnet *ifp; int q; ifp = sc->vmx_ifp; VMXNET3_CORE_LOCK_ASSERT(sc); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; sc->vmx_link_active = 0; callout_stop(&sc->vmx_tick); /* Disable interrupts. */ vmxnet3_disable_all_intrs(sc); vmxnet3_write_cmd(sc, VMXNET3_CMD_DISABLE); vmxnet3_stop_rendezvous(sc); for (q = 0; q < sc->vmx_ntxqueues; q++) vmxnet3_txstop(sc, &sc->vmx_txq[q]); for (q = 0; q < sc->vmx_nrxqueues; q++) vmxnet3_rxstop(sc, &sc->vmx_rxq[q]); vmxnet3_write_cmd(sc, VMXNET3_CMD_RESET); } static void vmxnet3_txinit(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *txq) { struct vmxnet3_txring *txr; struct vmxnet3_comp_ring *txc; txr = &txq->vxtxq_cmd_ring; txr->vxtxr_head = 0; txr->vxtxr_next = 0; txr->vxtxr_gen = VMXNET3_INIT_GEN; bzero(txr->vxtxr_txd, txr->vxtxr_ndesc * sizeof(struct vmxnet3_txdesc)); txc = &txq->vxtxq_comp_ring; txc->vxcr_next = 0; txc->vxcr_gen = VMXNET3_INIT_GEN; bzero(txc->vxcr_u.txcd, txc->vxcr_ndesc * sizeof(struct vmxnet3_txcompdesc)); } static int vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rxq) { struct ifnet *ifp; struct vmxnet3_rxring *rxr; struct vmxnet3_comp_ring *rxc; int i, populate, idx, frame_size, error; ifp = sc->vmx_ifp; frame_size = ETHER_ALIGN + sizeof(struct ether_vlan_header) + ifp->if_mtu; /* * If the MTU causes us to exceed what a regular sized cluster can * handle, we allocate a second MJUMPAGESIZE cluster after it in * ring 0. If in use, ring 1 always contains MJUMPAGESIZE clusters. * * Keep rx_max_chain a divisor of the maximum Rx ring size to make * our life easier. We do not support changing the ring size after * the attach. */ if (frame_size <= MCLBYTES) sc->vmx_rx_max_chain = 1; else sc->vmx_rx_max_chain = 2; /* * Only populate ring 1 if the configuration will take advantage * of it. That is either when LRO is enabled or the frame size * exceeds what ring 0 can contain. */ if ((ifp->if_capenable & IFCAP_LRO) == 0 && frame_size <= MCLBYTES + MJUMPAGESIZE) populate = 1; else populate = VMXNET3_RXRINGS_PERQ; for (i = 0; i < populate; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; rxr->vxrxr_fill = 0; rxr->vxrxr_gen = VMXNET3_INIT_GEN; bzero(rxr->vxrxr_rxd, rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); for (idx = 0; idx < rxr->vxrxr_ndesc; idx++) { error = vmxnet3_newbuf(sc, rxr); if (error) return (error); } } for (/**/; i < VMXNET3_RXRINGS_PERQ; i++) { rxr = &rxq->vxrxq_cmd_ring[i]; rxr->vxrxr_fill = 0; rxr->vxrxr_gen = 0; bzero(rxr->vxrxr_rxd, rxr->vxrxr_ndesc * sizeof(struct vmxnet3_rxdesc)); } rxc = &rxq->vxrxq_comp_ring; rxc->vxcr_next = 0; rxc->vxcr_gen = VMXNET3_INIT_GEN; bzero(rxc->vxcr_u.rxcd, rxc->vxcr_ndesc * sizeof(struct vmxnet3_rxcompdesc)); return (0); } static int vmxnet3_reinit_queues(struct vmxnet3_softc *sc) { device_t dev; int q, error; dev = sc->vmx_dev; for (q = 0; q < sc->vmx_ntxqueues; q++) vmxnet3_txinit(sc, &sc->vmx_txq[q]); for (q = 0; q < sc->vmx_nrxqueues; q++) { error = vmxnet3_rxinit(sc, &sc->vmx_rxq[q]); if (error) { device_printf(dev, "cannot populate Rx queue %d\n", q); return (error); } } return (0); } static int vmxnet3_enable_device(struct vmxnet3_softc *sc) { int q; if (vmxnet3_read_cmd(sc, VMXNET3_CMD_ENABLE) != 0) { device_printf(sc->vmx_dev, "device enable command failed!\n"); return (1); } /* Reset the Rx queue heads. */ for (q = 0; q < sc->vmx_nrxqueues; q++) { vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH1(q), 0); vmxnet3_write_bar0(sc, VMXNET3_BAR0_RXH2(q), 0); } return (0); } static void vmxnet3_reinit_rxfilters(struct vmxnet3_softc *sc) { struct ifnet *ifp; ifp = sc->vmx_ifp; vmxnet3_set_rxfilter(sc); if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) bcopy(sc->vmx_vlan_filter, sc->vmx_ds->vlan_filter, sizeof(sc->vmx_ds->vlan_filter)); else bzero(sc->vmx_ds->vlan_filter, sizeof(sc->vmx_ds->vlan_filter)); vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER); } static int vmxnet3_reinit(struct vmxnet3_softc *sc) { vmxnet3_reinit_interface(sc); vmxnet3_reinit_shared_data(sc); if (vmxnet3_reinit_queues(sc) != 0) return (ENXIO); if (vmxnet3_enable_device(sc) != 0) return (ENXIO); vmxnet3_reinit_rxfilters(sc); return (0); } static void vmxnet3_init_locked(struct vmxnet3_softc *sc) { struct ifnet *ifp; ifp = sc->vmx_ifp; if (ifp->if_drv_flags & IFF_DRV_RUNNING) return; vmxnet3_stop(sc); if (vmxnet3_reinit(sc) != 0) { vmxnet3_stop(sc); return; } ifp->if_drv_flags |= IFF_DRV_RUNNING; vmxnet3_link_status(sc); vmxnet3_enable_all_intrs(sc); callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc); } static void vmxnet3_init(void *xsc) { struct vmxnet3_softc *sc; sc = xsc; VMXNET3_CORE_LOCK(sc); vmxnet3_init_locked(sc); VMXNET3_CORE_UNLOCK(sc); } /* * BMV: Much of this can go away once we finally have offsets in * the mbuf packet header. Bug andre@. */ static int vmxnet3_txq_offload_ctx(struct vmxnet3_txqueue *txq, struct mbuf *m, int *etype, int *proto, int *start) { struct ether_vlan_header *evh; int offset; #if defined(INET) struct ip *ip = NULL; struct ip iphdr; #endif #if defined(INET6) struct ip6_hdr *ip6 = NULL; struct ip6_hdr ip6hdr; #endif evh = mtod(m, struct ether_vlan_header *); if (evh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { /* BMV: We should handle nested VLAN tags too. */ *etype = ntohs(evh->evl_proto); offset = sizeof(struct ether_vlan_header); } else { *etype = ntohs(evh->evl_encap_proto); offset = sizeof(struct ether_header); } switch (*etype) { #if defined(INET) case ETHERTYPE_IP: if (__predict_false(m->m_len < offset + sizeof(struct ip))) { m_copydata(m, offset, sizeof(struct ip), (caddr_t) &iphdr); ip = &iphdr; } else ip = mtodo(m, offset); *proto = ip->ip_p; *start = offset + (ip->ip_hl << 2); break; #endif #if defined(INET6) case ETHERTYPE_IPV6: if (__predict_false(m->m_len < offset + sizeof(struct ip6_hdr))) { m_copydata(m, offset, sizeof(struct ip6_hdr), (caddr_t) &ip6hdr); ip6 = &ip6hdr; } else ip6 = mtodo(m, offset); *proto = -1; *start = ip6_lasthdr(m, offset, IPPROTO_IPV6, proto); /* Assert the network stack sent us a valid packet. */ KASSERT(*start > offset, ("%s: mbuf %p start %d offset %d proto %d", __func__, m, *start, offset, *proto)); break; #endif default: return (EINVAL); } if (m->m_pkthdr.csum_flags & CSUM_TSO) { struct tcphdr *tcp, tcphdr; uint16_t sum; if (__predict_false(*proto != IPPROTO_TCP)) { /* Likely failed to correctly parse the mbuf. */ return (EINVAL); } txq->vxtxq_stats.vmtxs_tso++; switch (*etype) { #if defined(INET) case ETHERTYPE_IP: sum = in_pseudo(ip->ip_src.s_addr, ip->ip_dst.s_addr, htons(IPPROTO_TCP)); break; #endif #if defined(INET6) case ETHERTYPE_IPV6: sum = in6_cksum_pseudo(ip6, 0, IPPROTO_TCP, 0); break; #endif default: sum = 0; break; } if (m->m_len < *start + sizeof(struct tcphdr)) { m_copyback(m, *start + offsetof(struct tcphdr, th_sum), sizeof(uint16_t), (caddr_t) &sum); m_copydata(m, *start, sizeof(struct tcphdr), (caddr_t) &tcphdr); tcp = &tcphdr; } else { tcp = mtodo(m, *start); tcp->th_sum = sum; } /* * For TSO, the size of the protocol header is also * included in the descriptor header size. */ *start += (tcp->th_off << 2); } else txq->vxtxq_stats.vmtxs_csum++; return (0); } static int vmxnet3_txq_load_mbuf(struct vmxnet3_txqueue *txq, struct mbuf **m0, bus_dmamap_t dmap, bus_dma_segment_t segs[], int *nsegs) { struct vmxnet3_txring *txr; struct mbuf *m; bus_dma_tag_t tag; int error; txr = &txq->vxtxq_cmd_ring; m = *m0; tag = txr->vxtxr_txtag; error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0); if (error == 0 || error != EFBIG) return (error); m = m_defrag(m, M_NOWAIT); if (m != NULL) { *m0 = m; error = bus_dmamap_load_mbuf_sg(tag, dmap, m, segs, nsegs, 0); } else error = ENOBUFS; if (error) { m_freem(*m0); *m0 = NULL; txq->vxtxq_sc->vmx_stats.vmst_defrag_failed++; } else txq->vxtxq_sc->vmx_stats.vmst_defragged++; return (error); } static void vmxnet3_txq_unload_mbuf(struct vmxnet3_txqueue *txq, bus_dmamap_t dmap) { struct vmxnet3_txring *txr; txr = &txq->vxtxq_cmd_ring; bus_dmamap_unload(txr->vxtxr_txtag, dmap); } static int vmxnet3_txq_encap(struct vmxnet3_txqueue *txq, struct mbuf **m0) { struct vmxnet3_softc *sc; struct vmxnet3_txring *txr; struct vmxnet3_txdesc *txd, *sop; struct mbuf *m; bus_dmamap_t dmap; bus_dma_segment_t segs[VMXNET3_TX_MAXSEGS]; int i, gen, nsegs, etype, proto, start, error; sc = txq->vxtxq_sc; start = 0; txd = NULL; txr = &txq->vxtxq_cmd_ring; dmap = txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_dmamap; error = vmxnet3_txq_load_mbuf(txq, m0, dmap, segs, &nsegs); if (error) return (error); m = *m0; M_ASSERTPKTHDR(m); KASSERT(nsegs <= VMXNET3_TX_MAXSEGS, ("%s: mbuf %p with too many segments %d", __func__, m, nsegs)); if (VMXNET3_TXRING_AVAIL(txr) < nsegs) { txq->vxtxq_stats.vmtxs_full++; vmxnet3_txq_unload_mbuf(txq, dmap); return (ENOSPC); } else if (m->m_pkthdr.csum_flags & VMXNET3_CSUM_ALL_OFFLOAD) { error = vmxnet3_txq_offload_ctx(txq, m, &etype, &proto, &start); if (error) { txq->vxtxq_stats.vmtxs_offload_failed++; vmxnet3_txq_unload_mbuf(txq, dmap); m_freem(m); *m0 = NULL; return (error); } } txr->vxtxr_txbuf[txr->vxtxr_head].vtxb_m = m; sop = &txr->vxtxr_txd[txr->vxtxr_head]; gen = txr->vxtxr_gen ^ 1; /* Owned by cpu (yet) */ for (i = 0; i < nsegs; i++) { txd = &txr->vxtxr_txd[txr->vxtxr_head]; txd->addr = segs[i].ds_addr; txd->len = segs[i].ds_len; txd->gen = gen; txd->dtype = 0; txd->offload_mode = VMXNET3_OM_NONE; txd->offload_pos = 0; txd->hlen = 0; txd->eop = 0; txd->compreq = 0; txd->vtag_mode = 0; txd->vtag = 0; if (++txr->vxtxr_head == txr->vxtxr_ndesc) { txr->vxtxr_head = 0; txr->vxtxr_gen ^= 1; } gen = txr->vxtxr_gen; } txd->eop = 1; txd->compreq = 1; if (m->m_flags & M_VLANTAG) { sop->vtag_mode = 1; sop->vtag = m->m_pkthdr.ether_vtag; } if (m->m_pkthdr.csum_flags & CSUM_TSO) { sop->offload_mode = VMXNET3_OM_TSO; sop->hlen = start; sop->offload_pos = m->m_pkthdr.tso_segsz; } else if (m->m_pkthdr.csum_flags & (VMXNET3_CSUM_OFFLOAD | VMXNET3_CSUM_OFFLOAD_IPV6)) { sop->offload_mode = VMXNET3_OM_CSUM; sop->hlen = start; sop->offload_pos = start + m->m_pkthdr.csum_data; } /* Finally, change the ownership. */ vmxnet3_barrier(sc, VMXNET3_BARRIER_WR); sop->gen ^= 1; txq->vxtxq_ts->npending += nsegs; if (txq->vxtxq_ts->npending >= txq->vxtxq_ts->intr_threshold) { txq->vxtxq_ts->npending = 0; vmxnet3_write_bar0(sc, VMXNET3_BAR0_TXH(txq->vxtxq_id), txr->vxtxr_head); } return (0); } #ifdef VMXNET3_LEGACY_TX static void vmxnet3_start_locked(struct ifnet *ifp) { struct vmxnet3_softc *sc; struct vmxnet3_txqueue *txq; struct vmxnet3_txring *txr; struct mbuf *m_head; int tx, avail; sc = ifp->if_softc; txq = &sc->vmx_txq[0]; txr = &txq->vxtxq_cmd_ring; tx = 0; VMXNET3_TXQ_LOCK_ASSERT(txq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->vmx_link_active == 0) return; while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { if ((avail = VMXNET3_TXRING_AVAIL(txr)) < 2) break; IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Assume worse case if this mbuf is the head of a chain. */ if (m_head->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) { IFQ_DRV_PREPEND(&ifp->if_snd, m_head); break; } if (vmxnet3_txq_encap(txq, &m_head) != 0) { if (m_head != NULL) IFQ_DRV_PREPEND(&ifp->if_snd, m_head); break; } tx++; ETHER_BPF_MTAP(ifp, m_head); } if (tx > 0) txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT; } static void vmxnet3_start(struct ifnet *ifp) { struct vmxnet3_softc *sc; struct vmxnet3_txqueue *txq; sc = ifp->if_softc; txq = &sc->vmx_txq[0]; VMXNET3_TXQ_LOCK(txq); vmxnet3_start_locked(ifp); VMXNET3_TXQ_UNLOCK(txq); } #else /* !VMXNET3_LEGACY_TX */ static int vmxnet3_txq_mq_start_locked(struct vmxnet3_txqueue *txq, struct mbuf *m) { struct vmxnet3_softc *sc; struct vmxnet3_txring *txr; struct buf_ring *br; struct ifnet *ifp; int tx, avail, error; sc = txq->vxtxq_sc; br = txq->vxtxq_br; ifp = sc->vmx_ifp; txr = &txq->vxtxq_cmd_ring; tx = 0; error = 0; VMXNET3_TXQ_LOCK_ASSERT(txq); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0 || sc->vmx_link_active == 0) { if (m != NULL) error = drbr_enqueue(ifp, br, m); return (error); } if (m != NULL) { error = drbr_enqueue(ifp, br, m); if (error) return (error); } while ((avail = VMXNET3_TXRING_AVAIL(txr)) >= 2) { m = drbr_peek(ifp, br); if (m == NULL) break; /* Assume worse case if this mbuf is the head of a chain. */ if (m->m_next != NULL && avail < VMXNET3_TX_MAXSEGS) { drbr_putback(ifp, br, m); break; } if (vmxnet3_txq_encap(txq, &m) != 0) { if (m != NULL) drbr_putback(ifp, br, m); else drbr_advance(ifp, br); break; } drbr_advance(ifp, br); tx++; ETHER_BPF_MTAP(ifp, m); } if (tx > 0) txq->vxtxq_watchdog = VMXNET3_WATCHDOG_TIMEOUT; return (0); } static int vmxnet3_txq_mq_start(struct ifnet *ifp, struct mbuf *m) { struct vmxnet3_softc *sc; struct vmxnet3_txqueue *txq; int i, ntxq, error; sc = ifp->if_softc; ntxq = sc->vmx_ntxqueues; /* check if flowid is set */ if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) i = m->m_pkthdr.flowid % ntxq; else i = curcpu % ntxq; txq = &sc->vmx_txq[i]; if (VMXNET3_TXQ_TRYLOCK(txq) != 0) { error = vmxnet3_txq_mq_start_locked(txq, m); VMXNET3_TXQ_UNLOCK(txq); } else { error = drbr_enqueue(ifp, txq->vxtxq_br, m); taskqueue_enqueue(sc->vmx_tq, &txq->vxtxq_defrtask); } return (error); } static void vmxnet3_txq_tq_deferred(void *xtxq, int pending) { struct vmxnet3_softc *sc; struct vmxnet3_txqueue *txq; txq = xtxq; sc = txq->vxtxq_sc; VMXNET3_TXQ_LOCK(txq); if (!drbr_empty(sc->vmx_ifp, txq->vxtxq_br)) vmxnet3_txq_mq_start_locked(txq, NULL); VMXNET3_TXQ_UNLOCK(txq); } #endif /* VMXNET3_LEGACY_TX */ static void vmxnet3_txq_start(struct vmxnet3_txqueue *txq) { struct vmxnet3_softc *sc; struct ifnet *ifp; sc = txq->vxtxq_sc; ifp = sc->vmx_ifp; #ifdef VMXNET3_LEGACY_TX if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) vmxnet3_start_locked(ifp); #else if (!drbr_empty(ifp, txq->vxtxq_br)) vmxnet3_txq_mq_start_locked(txq, NULL); #endif } static void vmxnet3_tx_start_all(struct vmxnet3_softc *sc) { struct vmxnet3_txqueue *txq; int i; VMXNET3_CORE_LOCK_ASSERT(sc); for (i = 0; i < sc->vmx_ntxqueues; i++) { txq = &sc->vmx_txq[i]; VMXNET3_TXQ_LOCK(txq); vmxnet3_txq_start(txq); VMXNET3_TXQ_UNLOCK(txq); } } static void vmxnet3_update_vlan_filter(struct vmxnet3_softc *sc, int add, uint16_t tag) { struct ifnet *ifp; int idx, bit; ifp = sc->vmx_ifp; idx = (tag >> 5) & 0x7F; bit = tag & 0x1F; if (tag == 0 || tag > 4095) return; VMXNET3_CORE_LOCK(sc); /* Update our private VLAN bitvector. */ if (add) sc->vmx_vlan_filter[idx] |= (1 << bit); else sc->vmx_vlan_filter[idx] &= ~(1 << bit); if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) { if (add) sc->vmx_ds->vlan_filter[idx] |= (1 << bit); else sc->vmx_ds->vlan_filter[idx] &= ~(1 << bit); vmxnet3_write_cmd(sc, VMXNET3_CMD_VLAN_FILTER); } VMXNET3_CORE_UNLOCK(sc); } static void vmxnet3_register_vlan(void *arg, struct ifnet *ifp, uint16_t tag) { if (ifp->if_softc == arg) vmxnet3_update_vlan_filter(arg, 1, tag); } static void vmxnet3_unregister_vlan(void *arg, struct ifnet *ifp, uint16_t tag) { if (ifp->if_softc == arg) vmxnet3_update_vlan_filter(arg, 0, tag); } static void vmxnet3_set_rxfilter(struct vmxnet3_softc *sc) { struct ifnet *ifp; struct vmxnet3_driver_shared *ds; struct ifmultiaddr *ifma; u_int mode; ifp = sc->vmx_ifp; ds = sc->vmx_ds; mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST; if (ifp->if_flags & IFF_PROMISC) mode |= VMXNET3_RXMODE_PROMISC; if (ifp->if_flags & IFF_ALLMULTI) mode |= VMXNET3_RXMODE_ALLMULTI; else { int cnt = 0, overflow = 0; if_maddr_rlock(ifp); TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; else if (cnt == VMXNET3_MULTICAST_MAX) { overflow = 1; break; } bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), &sc->vmx_mcast[cnt*ETHER_ADDR_LEN], ETHER_ADDR_LEN); cnt++; } if_maddr_runlock(ifp); if (overflow != 0) { cnt = 0; mode |= VMXNET3_RXMODE_ALLMULTI; } else if (cnt > 0) mode |= VMXNET3_RXMODE_MCAST; ds->mcast_tablelen = cnt * ETHER_ADDR_LEN; } ds->rxmode = mode; vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_FILTER); vmxnet3_write_cmd(sc, VMXNET3_CMD_SET_RXMODE); } static int vmxnet3_change_mtu(struct vmxnet3_softc *sc, int mtu) { struct ifnet *ifp; ifp = sc->vmx_ifp; if (mtu < VMXNET3_MIN_MTU || mtu > VMXNET3_MAX_MTU) return (EINVAL); ifp->if_mtu = mtu; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vmxnet3_init_locked(sc); } return (0); } static int vmxnet3_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) { struct vmxnet3_softc *sc; struct ifreq *ifr; int reinit, mask, error; sc = ifp->if_softc; ifr = (struct ifreq *) data; error = 0; switch (cmd) { case SIOCSIFMTU: if (ifp->if_mtu != ifr->ifr_mtu) { VMXNET3_CORE_LOCK(sc); error = vmxnet3_change_mtu(sc, ifr->ifr_mtu); VMXNET3_CORE_UNLOCK(sc); } break; case SIOCSIFFLAGS: VMXNET3_CORE_LOCK(sc); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { if ((ifp->if_flags ^ sc->vmx_if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) { vmxnet3_set_rxfilter(sc); } } else vmxnet3_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) vmxnet3_stop(sc); } sc->vmx_if_flags = ifp->if_flags; VMXNET3_CORE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: VMXNET3_CORE_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) vmxnet3_set_rxfilter(sc); VMXNET3_CORE_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->vmx_media, cmd); break; case SIOCSIFCAP: VMXNET3_CORE_LOCK(sc); mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) ifp->if_capenable ^= IFCAP_TXCSUM; if (mask & IFCAP_TXCSUM_IPV6) ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; if (mask & IFCAP_TSO4) ifp->if_capenable ^= IFCAP_TSO4; if (mask & IFCAP_TSO6) ifp->if_capenable ^= IFCAP_TSO6; if (mask & (IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_LRO | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER)) { /* Changing these features requires us to reinit. */ reinit = 1; if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWFILTER) ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; } else reinit = 0; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vmxnet3_init_locked(sc); } else { vmxnet3_init_hwassist(sc); } VMXNET3_CORE_UNLOCK(sc); VLAN_CAPABILITIES(ifp); break; default: error = ether_ioctl(ifp, cmd, data); break; } VMXNET3_CORE_LOCK_ASSERT_NOTOWNED(sc); return (error); } #ifndef VMXNET3_LEGACY_TX static void vmxnet3_qflush(struct ifnet *ifp) { struct vmxnet3_softc *sc; struct vmxnet3_txqueue *txq; struct mbuf *m; int i; sc = ifp->if_softc; for (i = 0; i < sc->vmx_ntxqueues; i++) { txq = &sc->vmx_txq[i]; VMXNET3_TXQ_LOCK(txq); while ((m = buf_ring_dequeue_sc(txq->vxtxq_br)) != NULL) m_freem(m); VMXNET3_TXQ_UNLOCK(txq); } if_qflush(ifp); } #endif static int vmxnet3_watchdog(struct vmxnet3_txqueue *txq) { struct vmxnet3_softc *sc; sc = txq->vxtxq_sc; VMXNET3_TXQ_LOCK(txq); if (txq->vxtxq_watchdog == 0 || --txq->vxtxq_watchdog) { VMXNET3_TXQ_UNLOCK(txq); return (0); } VMXNET3_TXQ_UNLOCK(txq); if_printf(sc->vmx_ifp, "watchdog timeout on queue %d\n", txq->vxtxq_id); return (1); } static void vmxnet3_refresh_host_stats(struct vmxnet3_softc *sc) { vmxnet3_write_cmd(sc, VMXNET3_CMD_GET_STATS); } static uint64_t vmxnet3_get_counter(struct ifnet *ifp, ift_counter cnt) { struct vmxnet3_softc *sc; uint64_t rv; sc = if_getsoftc(ifp); rv = 0; /* * With the exception of if_ierrors, these ifnet statistics are * only updated in the driver, so just set them to our accumulated * values. if_ierrors is updated in ether_input() for malformed * frames that we should have already discarded. */ switch (cnt) { case IFCOUNTER_IPACKETS: for (int i = 0; i < sc->vmx_nrxqueues; i++) rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ipackets; return (rv); case IFCOUNTER_IQDROPS: for (int i = 0; i < sc->vmx_nrxqueues; i++) rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_iqdrops; return (rv); case IFCOUNTER_IERRORS: for (int i = 0; i < sc->vmx_nrxqueues; i++) rv += sc->vmx_rxq[i].vxrxq_stats.vmrxs_ierrors; return (rv); case IFCOUNTER_OPACKETS: for (int i = 0; i < sc->vmx_ntxqueues; i++) rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_opackets; return (rv); #ifndef VMXNET3_LEGACY_TX case IFCOUNTER_OBYTES: for (int i = 0; i < sc->vmx_ntxqueues; i++) rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_obytes; return (rv); case IFCOUNTER_OMCASTS: for (int i = 0; i < sc->vmx_ntxqueues; i++) rv += sc->vmx_txq[i].vxtxq_stats.vmtxs_omcasts; return (rv); #endif default: return (if_get_counter_default(ifp, cnt)); } } static void vmxnet3_tick(void *xsc) { struct vmxnet3_softc *sc; struct ifnet *ifp; int i, timedout; sc = xsc; ifp = sc->vmx_ifp; timedout = 0; VMXNET3_CORE_LOCK_ASSERT(sc); vmxnet3_refresh_host_stats(sc); for (i = 0; i < sc->vmx_ntxqueues; i++) timedout |= vmxnet3_watchdog(&sc->vmx_txq[i]); if (timedout != 0) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; vmxnet3_init_locked(sc); } else callout_reset(&sc->vmx_tick, hz, vmxnet3_tick, sc); } static int vmxnet3_link_is_up(struct vmxnet3_softc *sc) { uint32_t status; /* Also update the link speed while here. */ status = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_LINK); sc->vmx_link_speed = status >> 16; return !!(status & 0x1); } static void vmxnet3_link_status(struct vmxnet3_softc *sc) { struct ifnet *ifp; int link; ifp = sc->vmx_ifp; link = vmxnet3_link_is_up(sc); if (link != 0 && sc->vmx_link_active == 0) { sc->vmx_link_active = 1; if_link_state_change(ifp, LINK_STATE_UP); } else if (link == 0 && sc->vmx_link_active != 0) { sc->vmx_link_active = 0; if_link_state_change(ifp, LINK_STATE_DOWN); } } static void vmxnet3_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct vmxnet3_softc *sc; sc = ifp->if_softc; ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; VMXNET3_CORE_LOCK(sc); if (vmxnet3_link_is_up(sc) != 0) { ifmr->ifm_status |= IFM_ACTIVE; ifmr->ifm_active |= IFM_AUTO; } else ifmr->ifm_active |= IFM_NONE; VMXNET3_CORE_UNLOCK(sc); } static int vmxnet3_media_change(struct ifnet *ifp) { /* Ignore. */ return (0); } static void vmxnet3_set_lladdr(struct vmxnet3_softc *sc) { uint32_t ml, mh; ml = sc->vmx_lladdr[0]; ml |= sc->vmx_lladdr[1] << 8; ml |= sc->vmx_lladdr[2] << 16; ml |= sc->vmx_lladdr[3] << 24; vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACL, ml); mh = sc->vmx_lladdr[4]; mh |= sc->vmx_lladdr[5] << 8; vmxnet3_write_bar1(sc, VMXNET3_BAR1_MACH, mh); } static void vmxnet3_get_lladdr(struct vmxnet3_softc *sc) { uint32_t ml, mh; ml = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACL); mh = vmxnet3_read_cmd(sc, VMXNET3_CMD_GET_MACH); sc->vmx_lladdr[0] = ml; sc->vmx_lladdr[1] = ml >> 8; sc->vmx_lladdr[2] = ml >> 16; sc->vmx_lladdr[3] = ml >> 24; sc->vmx_lladdr[4] = mh; sc->vmx_lladdr[5] = mh >> 8; } static void vmxnet3_setup_txq_sysctl(struct vmxnet3_txqueue *txq, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { struct sysctl_oid *node, *txsnode; struct sysctl_oid_list *list, *txslist; struct vmxnet3_txq_stats *stats; struct UPT1_TxStats *txstats; char namebuf[16]; stats = &txq->vxtxq_stats; txstats = &txq->vxtxq_ts->stats; snprintf(namebuf, sizeof(namebuf), "txq%d", txq->vxtxq_id); node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Transmit Queue"); txq->vxtxq_sysctl = list = SYSCTL_CHILDREN(node); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "opackets", CTLFLAG_RD, &stats->vmtxs_opackets, "Transmit packets"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "obytes", CTLFLAG_RD, &stats->vmtxs_obytes, "Transmit bytes"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "omcasts", CTLFLAG_RD, &stats->vmtxs_omcasts, "Transmit multicasts"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "csum", CTLFLAG_RD, &stats->vmtxs_csum, "Transmit checksum offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "tso", CTLFLAG_RD, &stats->vmtxs_tso, "Transmit TCP segmentation offloaded"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ringfull", CTLFLAG_RD, &stats->vmtxs_full, "Transmit ring full"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "offload_failed", CTLFLAG_RD, &stats->vmtxs_offload_failed, "Transmit checksum offload failed"); /* * Add statistics reported by the host. These are updated once * per second. */ txsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD, NULL, "Host Statistics"); txslist = SYSCTL_CHILDREN(txsnode); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_packets", CTLFLAG_RD, &txstats->TSO_packets, "TSO packets"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "tso_bytes", CTLFLAG_RD, &txstats->TSO_bytes, "TSO bytes"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "ucast_packets", CTLFLAG_RD, &txstats->ucast_packets, "Unicast packets"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD, &txstats->ucast_bytes, "Unicast bytes"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_packets", CTLFLAG_RD, &txstats->mcast_packets, "Multicast packets"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD, &txstats->mcast_bytes, "Multicast bytes"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "error", CTLFLAG_RD, &txstats->error, "Errors"); SYSCTL_ADD_UQUAD(ctx, txslist, OID_AUTO, "discard", CTLFLAG_RD, &txstats->discard, "Discards"); } static void vmxnet3_setup_rxq_sysctl(struct vmxnet3_rxqueue *rxq, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { struct sysctl_oid *node, *rxsnode; struct sysctl_oid_list *list, *rxslist; struct vmxnet3_rxq_stats *stats; struct UPT1_RxStats *rxstats; char namebuf[16]; stats = &rxq->vxrxq_stats; rxstats = &rxq->vxrxq_rs->stats; snprintf(namebuf, sizeof(namebuf), "rxq%d", rxq->vxrxq_id); node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, CTLFLAG_RD, NULL, "Receive Queue"); rxq->vxrxq_sysctl = list = SYSCTL_CHILDREN(node); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ipackets", CTLFLAG_RD, &stats->vmrxs_ipackets, "Receive packets"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ibytes", CTLFLAG_RD, &stats->vmrxs_ibytes, "Receive bytes"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "iqdrops", CTLFLAG_RD, &stats->vmrxs_iqdrops, "Receive drops"); SYSCTL_ADD_UQUAD(ctx, list, OID_AUTO, "ierrors", CTLFLAG_RD, &stats->vmrxs_ierrors, "Receive errors"); /* * Add statistics reported by the host. These are updated once * per second. */ rxsnode = SYSCTL_ADD_NODE(ctx, list, OID_AUTO, "hstats", CTLFLAG_RD, NULL, "Host Statistics"); rxslist = SYSCTL_CHILDREN(rxsnode); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_packets", CTLFLAG_RD, &rxstats->LRO_packets, "LRO packets"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "lro_bytes", CTLFLAG_RD, &rxstats->LRO_bytes, "LRO bytes"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "ucast_packets", CTLFLAG_RD, &rxstats->ucast_packets, "Unicast packets"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "unicast_bytes", CTLFLAG_RD, &rxstats->ucast_bytes, "Unicast bytes"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_packets", CTLFLAG_RD, &rxstats->mcast_packets, "Multicast packets"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "mcast_bytes", CTLFLAG_RD, &rxstats->mcast_bytes, "Multicast bytes"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_packets", CTLFLAG_RD, &rxstats->bcast_packets, "Broadcast packets"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "bcast_bytes", CTLFLAG_RD, &rxstats->bcast_bytes, "Broadcast bytes"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "nobuffer", CTLFLAG_RD, &rxstats->nobuffer, "No buffer"); SYSCTL_ADD_UQUAD(ctx, rxslist, OID_AUTO, "error", CTLFLAG_RD, &rxstats->error, "Errors"); } static void vmxnet3_setup_debug_sysctl(struct vmxnet3_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { struct sysctl_oid *node; struct sysctl_oid_list *list; int i; for (i = 0; i < sc->vmx_ntxqueues; i++) { struct vmxnet3_txqueue *txq = &sc->vmx_txq[i]; node = SYSCTL_ADD_NODE(ctx, txq->vxtxq_sysctl, OID_AUTO, "debug", CTLFLAG_RD, NULL, ""); list = SYSCTL_CHILDREN(node); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_head", CTLFLAG_RD, &txq->vxtxq_cmd_ring.vxtxr_head, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_next", CTLFLAG_RD, &txq->vxtxq_cmd_ring.vxtxr_next, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd_ndesc", CTLFLAG_RD, &txq->vxtxq_cmd_ring.vxtxr_ndesc, 0, ""); SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd_gen", CTLFLAG_RD, &txq->vxtxq_cmd_ring.vxtxr_gen, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD, &txq->vxtxq_comp_ring.vxcr_next, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD, &txq->vxtxq_comp_ring.vxcr_ndesc, 0,""); SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD, &txq->vxtxq_comp_ring.vxcr_gen, 0, ""); } for (i = 0; i < sc->vmx_nrxqueues; i++) { struct vmxnet3_rxqueue *rxq = &sc->vmx_rxq[i]; node = SYSCTL_ADD_NODE(ctx, rxq->vxrxq_sysctl, OID_AUTO, "debug", CTLFLAG_RD, NULL, ""); list = SYSCTL_CHILDREN(node); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_fill", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[0].vxrxr_fill, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd0_ndesc", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[0].vxrxr_ndesc, 0, ""); SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd0_gen", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[0].vxrxr_gen, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_fill", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[1].vxrxr_fill, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "cmd1_ndesc", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[1].vxrxr_ndesc, 0, ""); SYSCTL_ADD_INT(ctx, list, OID_AUTO, "cmd1_gen", CTLFLAG_RD, &rxq->vxrxq_cmd_ring[1].vxrxr_gen, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_next", CTLFLAG_RD, &rxq->vxrxq_comp_ring.vxcr_next, 0, ""); SYSCTL_ADD_UINT(ctx, list, OID_AUTO, "comp_ndesc", CTLFLAG_RD, &rxq->vxrxq_comp_ring.vxcr_ndesc, 0,""); SYSCTL_ADD_INT(ctx, list, OID_AUTO, "comp_gen", CTLFLAG_RD, &rxq->vxrxq_comp_ring.vxcr_gen, 0, ""); } } static void vmxnet3_setup_queue_sysctl(struct vmxnet3_softc *sc, struct sysctl_ctx_list *ctx, struct sysctl_oid_list *child) { int i; for (i = 0; i < sc->vmx_ntxqueues; i++) vmxnet3_setup_txq_sysctl(&sc->vmx_txq[i], ctx, child); for (i = 0; i < sc->vmx_nrxqueues; i++) vmxnet3_setup_rxq_sysctl(&sc->vmx_rxq[i], ctx, child); vmxnet3_setup_debug_sysctl(sc, ctx, child); } static void vmxnet3_setup_sysctl(struct vmxnet3_softc *sc) { device_t dev; struct vmxnet3_statistics *stats; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = sc->vmx_dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_ntxqueues", CTLFLAG_RD, &sc->vmx_max_ntxqueues, 0, "Maximum number of Tx queues"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_nrxqueues", CTLFLAG_RD, &sc->vmx_max_nrxqueues, 0, "Maximum number of Rx queues"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "ntxqueues", CTLFLAG_RD, &sc->vmx_ntxqueues, 0, "Number of Tx queues"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nrxqueues", CTLFLAG_RD, &sc->vmx_nrxqueues, 0, "Number of Rx queues"); stats = &sc->vmx_stats; SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defragged", CTLFLAG_RD, &stats->vmst_defragged, 0, "Tx mbuf chains defragged"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "defrag_failed", CTLFLAG_RD, &stats->vmst_defrag_failed, 0, "Tx mbuf dropped because defrag failed"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mgetcl_failed", CTLFLAG_RD, &stats->vmst_mgetcl_failed, 0, "mbuf cluster allocation failed"); SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "mbuf_load_failed", CTLFLAG_RD, &stats->vmst_mbuf_load_failed, 0, "mbuf load segments failed"); vmxnet3_setup_queue_sysctl(sc, ctx, child); } static void vmxnet3_write_bar0(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v) { bus_space_write_4(sc->vmx_iot0, sc->vmx_ioh0, r, v); } static uint32_t vmxnet3_read_bar1(struct vmxnet3_softc *sc, bus_size_t r) { return (bus_space_read_4(sc->vmx_iot1, sc->vmx_ioh1, r)); } static void vmxnet3_write_bar1(struct vmxnet3_softc *sc, bus_size_t r, uint32_t v) { bus_space_write_4(sc->vmx_iot1, sc->vmx_ioh1, r, v); } static void vmxnet3_write_cmd(struct vmxnet3_softc *sc, uint32_t cmd) { vmxnet3_write_bar1(sc, VMXNET3_BAR1_CMD, cmd); } static uint32_t vmxnet3_read_cmd(struct vmxnet3_softc *sc, uint32_t cmd) { vmxnet3_write_cmd(sc, cmd); bus_space_barrier(sc->vmx_iot1, sc->vmx_ioh1, 0, 0, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (vmxnet3_read_bar1(sc, VMXNET3_BAR1_CMD)); } static void vmxnet3_enable_intr(struct vmxnet3_softc *sc, int irq) { vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 0); } static void vmxnet3_disable_intr(struct vmxnet3_softc *sc, int irq) { vmxnet3_write_bar0(sc, VMXNET3_BAR0_IMASK(irq), 1); } static void vmxnet3_enable_all_intrs(struct vmxnet3_softc *sc) { int i; sc->vmx_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL; for (i = 0; i < sc->vmx_nintrs; i++) vmxnet3_enable_intr(sc, i); } static void vmxnet3_disable_all_intrs(struct vmxnet3_softc *sc) { int i; sc->vmx_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL; for (i = 0; i < sc->vmx_nintrs; i++) vmxnet3_disable_intr(sc, i); } static void vmxnet3_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *baddr = arg; if (error == 0) *baddr = segs->ds_addr; } static int vmxnet3_dma_malloc(struct vmxnet3_softc *sc, bus_size_t size, bus_size_t align, struct vmxnet3_dma_alloc *dma) { device_t dev; int error; dev = sc->vmx_dev; bzero(dma, sizeof(struct vmxnet3_dma_alloc)); error = bus_dma_tag_create(bus_get_dma_tag(dev), align, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ size, /* maxsize */ 1, /* nsegments */ size, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &dma->dma_tag); if (error) { device_printf(dev, "bus_dma_tag_create failed: %d\n", error); goto fail; } error = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, BUS_DMA_ZERO | BUS_DMA_NOWAIT, &dma->dma_map); if (error) { device_printf(dev, "bus_dmamem_alloc failed: %d\n", error); goto fail; } error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, size, vmxnet3_dmamap_cb, &dma->dma_paddr, BUS_DMA_NOWAIT); if (error) { device_printf(dev, "bus_dmamap_load failed: %d\n", error); goto fail; } dma->dma_size = size; fail: if (error) vmxnet3_dma_free(sc, dma); return (error); } static void vmxnet3_dma_free(struct vmxnet3_softc *sc, struct vmxnet3_dma_alloc *dma) { if (dma->dma_tag != NULL) { if (dma->dma_paddr != 0) { bus_dmamap_sync(dma->dma_tag, dma->dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->dma_tag, dma->dma_map); } if (dma->dma_vaddr != NULL) { bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); } bus_dma_tag_destroy(dma->dma_tag); } bzero(dma, sizeof(struct vmxnet3_dma_alloc)); } static int vmxnet3_tunable_int(struct vmxnet3_softc *sc, const char *knob, int def) { char path[64]; snprintf(path, sizeof(path), "hw.vmx.%d.%s", device_get_unit(sc->vmx_dev), knob); TUNABLE_INT_FETCH(path, &def); return (def); } /* * Since this is a purely paravirtualized device, we do not have * to worry about DMA coherency. But at times, we must make sure * both the compiler and CPU do not reorder memory operations. */ static inline void vmxnet3_barrier(struct vmxnet3_softc *sc, vmxnet3_barrier_t type) { switch (type) { case VMXNET3_BARRIER_RD: rmb(); break; case VMXNET3_BARRIER_WR: wmb(); break; case VMXNET3_BARRIER_RDWR: mb(); break; default: panic("%s: bad barrier type %d", __func__, type); } } Index: head/sys/dev/vnic/nicvf_queues.c =================================================================== --- head/sys/dev/vnic/nicvf_queues.c (revision 327948) +++ head/sys/dev/vnic/nicvf_queues.c (revision 327949) @@ -1,2366 +1,2366 @@ /* * Copyright (C) 2015 Cavium Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ * */ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include "opt_inet6.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "thunder_bgx.h" #include "nic_reg.h" #include "nic.h" #include "q_struct.h" #include "nicvf_queues.h" #define DEBUG #undef DEBUG #ifdef DEBUG #define dprintf(dev, fmt, ...) device_printf(dev, fmt, ##__VA_ARGS__) #else #define dprintf(dev, fmt, ...) #endif MALLOC_DECLARE(M_NICVF); static void nicvf_free_snd_queue(struct nicvf *, struct snd_queue *); static struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *, struct cqe_rx_t *); static void nicvf_sq_disable(struct nicvf *, int); static void nicvf_sq_enable(struct nicvf *, struct snd_queue *, int); static void nicvf_put_sq_desc(struct snd_queue *, int); static void nicvf_cmp_queue_config(struct nicvf *, struct queue_set *, int, boolean_t); static void nicvf_sq_free_used_descs(struct nicvf *, struct snd_queue *, int); static int nicvf_tx_mbuf_locked(struct snd_queue *, struct mbuf **); static void nicvf_rbdr_task(void *, int); static void nicvf_rbdr_task_nowait(void *, int); struct rbuf_info { bus_dma_tag_t dmat; bus_dmamap_t dmap; struct mbuf * mbuf; }; #define GET_RBUF_INFO(x) ((struct rbuf_info *)((x) - NICVF_RCV_BUF_ALIGN_BYTES)) /* Poll a register for a specific value */ static int nicvf_poll_reg(struct nicvf *nic, int qidx, uint64_t reg, int bit_pos, int bits, int val) { uint64_t bit_mask; uint64_t reg_val; int timeout = 10; bit_mask = (1UL << bits) - 1; bit_mask = (bit_mask << bit_pos); while (timeout) { reg_val = nicvf_queue_reg_read(nic, reg, qidx); if (((reg_val & bit_mask) >> bit_pos) == val) return (0); DELAY(1000); timeout--; } device_printf(nic->dev, "Poll on reg 0x%lx failed\n", reg); return (ETIMEDOUT); } /* Callback for bus_dmamap_load() */ static void nicvf_dmamap_q_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { bus_addr_t *paddr; KASSERT(nseg == 1, ("wrong number of segments, should be 1")); paddr = arg; *paddr = segs->ds_addr; } /* Allocate memory for a queue's descriptors */ static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem, int q_len, int desc_size, int align_bytes) { int err, err_dmat; /* Create DMA tag first */ err = bus_dma_tag_create( bus_get_dma_tag(nic->dev), /* parent tag */ align_bytes, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ (q_len * desc_size), /* maxsize */ 1, /* nsegments */ (q_len * desc_size), /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &dmem->dmat); /* dmat */ if (err != 0) { device_printf(nic->dev, "Failed to create busdma tag for descriptors ring\n"); return (err); } /* Allocate segment of continuous DMA safe memory */ err = bus_dmamem_alloc( dmem->dmat, /* DMA tag */ &dmem->base, /* virtual address */ (BUS_DMA_NOWAIT | BUS_DMA_ZERO), /* flags */ &dmem->dmap); /* DMA map */ if (err != 0) { device_printf(nic->dev, "Failed to allocate DMA safe memory for" "descriptors ring\n"); goto dmamem_fail; } err = bus_dmamap_load( dmem->dmat, dmem->dmap, dmem->base, (q_len * desc_size), /* allocation size */ nicvf_dmamap_q_cb, /* map to DMA address cb. */ &dmem->phys_base, /* physical address */ BUS_DMA_NOWAIT); if (err != 0) { device_printf(nic->dev, "Cannot load DMA map of descriptors ring\n"); goto dmamap_fail; } dmem->q_len = q_len; dmem->size = (desc_size * q_len); return (0); dmamap_fail: bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); dmem->phys_base = 0; dmamem_fail: err_dmat = bus_dma_tag_destroy(dmem->dmat); dmem->base = NULL; KASSERT(err_dmat == 0, ("%s: Trying to destroy BUSY DMA tag", __func__)); return (err); } /* Free queue's descriptor memory */ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem) { int err; if ((dmem == NULL) || (dmem->base == NULL)) return; /* Unload a map */ bus_dmamap_sync(dmem->dmat, dmem->dmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(dmem->dmat, dmem->dmap); /* Free DMA memory */ bus_dmamem_free(dmem->dmat, dmem->base, dmem->dmap); /* Destroy DMA tag */ err = bus_dma_tag_destroy(dmem->dmat); KASSERT(err == 0, ("%s: Trying to destroy BUSY DMA tag", __func__)); dmem->phys_base = 0; dmem->base = NULL; } /* * Allocate buffer for packet reception * HW returns memory address where packet is DMA'ed but not a pointer * into RBDR ring, so save buffer address at the start of fragment and * align the start address to a cache aligned address */ static __inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, struct rbdr *rbdr, bus_dmamap_t dmap, int mflags, uint32_t buf_len, bus_addr_t *rbuf) { struct mbuf *mbuf; struct rbuf_info *rinfo; bus_dma_segment_t segs[1]; int nsegs; int err; mbuf = m_getjcl(mflags, MT_DATA, M_PKTHDR, MCLBYTES); if (mbuf == NULL) return (ENOMEM); /* * The length is equal to the actual length + one 128b line * used as a room for rbuf_info structure. */ mbuf->m_len = mbuf->m_pkthdr.len = buf_len; err = bus_dmamap_load_mbuf_sg(rbdr->rbdr_buff_dmat, dmap, mbuf, segs, &nsegs, BUS_DMA_NOWAIT); if (err != 0) { device_printf(nic->dev, "Failed to map mbuf into DMA visible memory, err: %d\n", err); m_freem(mbuf); bus_dmamap_destroy(rbdr->rbdr_buff_dmat, dmap); return (err); } if (nsegs != 1) panic("Unexpected number of DMA segments for RB: %d", nsegs); /* * Now use the room for rbuf_info structure * and adjust mbuf data and length. */ rinfo = (struct rbuf_info *)mbuf->m_data; m_adj(mbuf, NICVF_RCV_BUF_ALIGN_BYTES); rinfo->dmat = rbdr->rbdr_buff_dmat; rinfo->dmap = dmap; rinfo->mbuf = mbuf; *rbuf = segs[0].ds_addr + NICVF_RCV_BUF_ALIGN_BYTES; return (0); } /* Retrieve mbuf for received packet */ static struct mbuf * nicvf_rb_ptr_to_mbuf(struct nicvf *nic, bus_addr_t rb_ptr) { struct mbuf *mbuf; struct rbuf_info *rinfo; /* Get buffer start address and alignment offset */ rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(rb_ptr)); /* Now retrieve mbuf to give to stack */ mbuf = rinfo->mbuf; if (__predict_false(mbuf == NULL)) { panic("%s: Received packet fragment with NULL mbuf", device_get_nameunit(nic->dev)); } /* * Clear the mbuf in the descriptor to indicate * that this slot is processed and free to use. */ rinfo->mbuf = NULL; bus_dmamap_sync(rinfo->dmat, rinfo->dmap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(rinfo->dmat, rinfo->dmap); return (mbuf); } /* Allocate RBDR ring and populate receive buffers */ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr, int ring_len, int buf_size, int qidx) { bus_dmamap_t dmap; bus_addr_t rbuf; struct rbdr_entry_t *desc; int idx; int err; /* Allocate rbdr descriptors ring */ err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len, sizeof(struct rbdr_entry_t), NICVF_RCV_BUF_ALIGN_BYTES); if (err != 0) { device_printf(nic->dev, "Failed to create RBDR descriptors ring\n"); return (err); } rbdr->desc = rbdr->dmem.base; /* * Buffer size has to be in multiples of 128 bytes. * Make room for metadata of size of one line (128 bytes). */ rbdr->dma_size = buf_size - NICVF_RCV_BUF_ALIGN_BYTES; rbdr->enable = TRUE; rbdr->thresh = RBDR_THRESH; rbdr->nic = nic; rbdr->idx = qidx; /* * Create DMA tag for Rx buffers. * Each map created using this tag is intended to store Rx payload for * one fragment and one header structure containing rbuf_info (thus * additional 128 byte line since RB must be a multiple of 128 byte * cache line). */ if (buf_size > MCLBYTES) { device_printf(nic->dev, "Buffer size to large for mbuf cluster\n"); return (EINVAL); } err = bus_dma_tag_create( bus_get_dma_tag(nic->dev), /* parent tag */ NICVF_RCV_BUF_ALIGN_BYTES, /* alignment */ 0, /* boundary */ DMAP_MAX_PHYSADDR, /* lowaddr */ DMAP_MIN_PHYSADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ roundup2(buf_size, MCLBYTES), /* maxsize */ 1, /* nsegments */ roundup2(buf_size, MCLBYTES), /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &rbdr->rbdr_buff_dmat); /* dmat */ if (err != 0) { device_printf(nic->dev, "Failed to create busdma tag for RBDR buffers\n"); return (err); } rbdr->rbdr_buff_dmaps = malloc(sizeof(*rbdr->rbdr_buff_dmaps) * ring_len, M_NICVF, (M_WAITOK | M_ZERO)); for (idx = 0; idx < ring_len; idx++) { err = bus_dmamap_create(rbdr->rbdr_buff_dmat, 0, &dmap); if (err != 0) { device_printf(nic->dev, "Failed to create DMA map for RB\n"); return (err); } rbdr->rbdr_buff_dmaps[idx] = dmap; err = nicvf_alloc_rcv_buffer(nic, rbdr, dmap, M_WAITOK, DMA_BUFFER_LEN, &rbuf); if (err != 0) return (err); desc = GET_RBDR_DESC(rbdr, idx); desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); } /* Allocate taskqueue */ TASK_INIT(&rbdr->rbdr_task, 0, nicvf_rbdr_task, rbdr); TASK_INIT(&rbdr->rbdr_task_nowait, 0, nicvf_rbdr_task_nowait, rbdr); rbdr->rbdr_taskq = taskqueue_create_fast("nicvf_rbdr_taskq", M_WAITOK, taskqueue_thread_enqueue, &rbdr->rbdr_taskq); taskqueue_start_threads(&rbdr->rbdr_taskq, 1, PI_NET, "%s: rbdr_taskq", device_get_nameunit(nic->dev)); return (0); } /* Free RBDR ring and its receive buffers */ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr) { struct mbuf *mbuf; struct queue_set *qs; struct rbdr_entry_t *desc; struct rbuf_info *rinfo; bus_addr_t buf_addr; int head, tail, idx; int err; qs = nic->qs; if ((qs == NULL) || (rbdr == NULL)) return; rbdr->enable = FALSE; if (rbdr->rbdr_taskq != NULL) { /* Remove tasks */ while (taskqueue_cancel(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait, NULL) != 0) { /* Finish the nowait task first */ taskqueue_drain(rbdr->rbdr_taskq, &rbdr->rbdr_task_nowait); } taskqueue_free(rbdr->rbdr_taskq); rbdr->rbdr_taskq = NULL; while (taskqueue_cancel(taskqueue_thread, &rbdr->rbdr_task, NULL) != 0) { /* Now finish the sleepable task */ taskqueue_drain(taskqueue_thread, &rbdr->rbdr_task); } } /* * Free all of the memory under the RB descriptors. * There are assumptions here: * 1. Corresponding RBDR is disabled * - it is safe to operate using head and tail indexes * 2. All bffers that were received are properly freed by * the receive handler * - there is no need to unload DMA map and free MBUF for other * descriptors than unused ones */ if (rbdr->rbdr_buff_dmat != NULL) { head = rbdr->head; tail = rbdr->tail; while (head != tail) { desc = GET_RBDR_DESC(rbdr, head); buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); mbuf = rinfo->mbuf; /* This will destroy everything including rinfo! */ m_freem(mbuf); head++; head &= (rbdr->dmem.q_len - 1); } /* Free tail descriptor */ desc = GET_RBDR_DESC(rbdr, tail); buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN; rinfo = GET_RBUF_INFO(PHYS_TO_DMAP(buf_addr)); bus_dmamap_unload(rbdr->rbdr_buff_dmat, rinfo->dmap); mbuf = rinfo->mbuf; /* This will destroy everything including rinfo! */ m_freem(mbuf); /* Destroy DMA maps */ for (idx = 0; idx < qs->rbdr_len; idx++) { if (rbdr->rbdr_buff_dmaps[idx] == NULL) continue; err = bus_dmamap_destroy(rbdr->rbdr_buff_dmat, rbdr->rbdr_buff_dmaps[idx]); KASSERT(err == 0, ("%s: Could not destroy DMA map for RB, desc: %d", __func__, idx)); rbdr->rbdr_buff_dmaps[idx] = NULL; } /* Now destroy the tag */ err = bus_dma_tag_destroy(rbdr->rbdr_buff_dmat); KASSERT(err == 0, ("%s: Trying to destroy BUSY DMA tag", __func__)); rbdr->head = 0; rbdr->tail = 0; } /* Free RBDR ring */ nicvf_free_q_desc_mem(nic, &rbdr->dmem); } /* * Refill receive buffer descriptors with new buffers. */ static int nicvf_refill_rbdr(struct rbdr *rbdr, int mflags) { struct nicvf *nic; struct queue_set *qs; int rbdr_idx; int tail, qcount; int refill_rb_cnt; struct rbdr_entry_t *desc; bus_dmamap_t dmap; bus_addr_t rbuf; boolean_t rb_alloc_fail; int new_rb; rb_alloc_fail = TRUE; new_rb = 0; nic = rbdr->nic; qs = nic->qs; rbdr_idx = rbdr->idx; /* Check if it's enabled */ if (!rbdr->enable) return (0); /* Get no of desc's to be refilled */ qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx); qcount &= 0x7FFFF; /* Doorbell can be ringed with a max of ring size minus 1 */ if (qcount >= (qs->rbdr_len - 1)) { rb_alloc_fail = FALSE; goto out; } else refill_rb_cnt = qs->rbdr_len - qcount - 1; /* Start filling descs from tail */ tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3; while (refill_rb_cnt) { tail++; tail &= (rbdr->dmem.q_len - 1); dmap = rbdr->rbdr_buff_dmaps[tail]; if (nicvf_alloc_rcv_buffer(nic, rbdr, dmap, mflags, DMA_BUFFER_LEN, &rbuf)) { /* Something went wrong. Resign */ break; } desc = GET_RBDR_DESC(rbdr, tail); desc->buf_addr = (rbuf >> NICVF_RCV_BUF_ALIGN); refill_rb_cnt--; new_rb++; } /* make sure all memory stores are done before ringing doorbell */ wmb(); /* Check if buffer allocation failed */ if (refill_rb_cnt == 0) rb_alloc_fail = FALSE; /* Notify HW */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, rbdr_idx, new_rb); out: if (!rb_alloc_fail) { /* * Re-enable RBDR interrupts only * if buffer allocation is success. */ nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx); return (0); } return (ENOMEM); } /* Refill RBs even if sleep is needed to reclaim memory */ static void nicvf_rbdr_task(void *arg, int pending) { struct rbdr *rbdr; int err; rbdr = (struct rbdr *)arg; err = nicvf_refill_rbdr(rbdr, M_WAITOK); if (__predict_false(err != 0)) { panic("%s: Failed to refill RBs even when sleep enabled", __func__); } } /* Refill RBs as soon as possible without waiting */ static void nicvf_rbdr_task_nowait(void *arg, int pending) { struct rbdr *rbdr; int err; rbdr = (struct rbdr *)arg; err = nicvf_refill_rbdr(rbdr, M_NOWAIT); if (err != 0) { /* * Schedule another, sleepable kernel thread * that will for sure refill the buffers. */ taskqueue_enqueue(taskqueue_thread, &rbdr->rbdr_task); } } static int nicvf_rcv_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, struct cqe_rx_t *cqe_rx, int cqe_type) { struct mbuf *mbuf; struct rcv_queue *rq; int rq_idx; int err = 0; rq_idx = cqe_rx->rq_idx; rq = &nic->qs->rq[rq_idx]; /* Check for errors */ err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx); if (err && !cqe_rx->rb_cnt) return (0); mbuf = nicvf_get_rcv_mbuf(nic, cqe_rx); if (mbuf == NULL) { dprintf(nic->dev, "Packet not received\n"); return (0); } /* If error packet */ if (err != 0) { m_freem(mbuf); return (0); } if (rq->lro_enabled && ((cqe_rx->l3_type == L3TYPE_IPV4) && (cqe_rx->l4_type == L4TYPE_TCP)) && (mbuf->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { /* * At this point it is known that there are no errors in the * packet. Attempt to LRO enqueue. Send to stack if no resources * or enqueue error. */ if ((rq->lro.lro_cnt != 0) && (tcp_lro_rx(&rq->lro, mbuf, 0) == 0)) return (0); } /* * Push this packet to the stack later to avoid * unlocking completion task in the middle of work. */ err = buf_ring_enqueue(cq->rx_br, mbuf); if (err != 0) { /* * Failed to enqueue this mbuf. * We don't drop it, just schedule another task. */ return (err); } return (0); } static void nicvf_snd_pkt_handler(struct nicvf *nic, struct cmp_queue *cq, struct cqe_send_t *cqe_tx, int cqe_type) { bus_dmamap_t dmap; struct mbuf *mbuf; struct snd_queue *sq; struct sq_hdr_subdesc *hdr; mbuf = NULL; sq = &nic->qs->sq[cqe_tx->sq_idx]; hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr); if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) return; dprintf(nic->dev, "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n", __func__, cqe_tx->sq_qs, cqe_tx->sq_idx, cqe_tx->sqe_ptr, hdr->subdesc_cnt); dmap = (bus_dmamap_t)sq->snd_buff[cqe_tx->sqe_ptr].dmap; bus_dmamap_unload(sq->snd_buff_dmat, dmap); mbuf = (struct mbuf *)sq->snd_buff[cqe_tx->sqe_ptr].mbuf; if (mbuf != NULL) { m_freem(mbuf); sq->snd_buff[cqe_tx->sqe_ptr].mbuf = NULL; nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); } nicvf_check_cqe_tx_errs(nic, cq, cqe_tx); } static int nicvf_cq_intr_handler(struct nicvf *nic, uint8_t cq_idx) { struct mbuf *mbuf; struct ifnet *ifp; int processed_cqe, work_done = 0, tx_done = 0; int cqe_count, cqe_head; struct queue_set *qs = nic->qs; struct cmp_queue *cq = &qs->cq[cq_idx]; struct snd_queue *sq = &qs->sq[cq_idx]; struct rcv_queue *rq; struct cqe_rx_t *cq_desc; struct lro_ctrl *lro; int rq_idx; int cmp_err; NICVF_CMP_LOCK(cq); cmp_err = 0; processed_cqe = 0; /* Get no of valid CQ entries to process */ cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx); cqe_count &= CQ_CQE_COUNT; if (cqe_count == 0) goto out; /* Get head of the valid CQ entries */ cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; cqe_head &= 0xFFFF; dprintf(nic->dev, "%s CQ%d cqe_count %d cqe_head %d\n", __func__, cq_idx, cqe_count, cqe_head); while (processed_cqe < cqe_count) { /* Get the CQ descriptor */ cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); cqe_head++; cqe_head &= (cq->dmem.q_len - 1); /* Prefetch next CQ descriptor */ __builtin_prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head)); dprintf(nic->dev, "CQ%d cq_desc->cqe_type %d\n", cq_idx, cq_desc->cqe_type); switch (cq_desc->cqe_type) { case CQE_TYPE_RX: cmp_err = nicvf_rcv_pkt_handler(nic, cq, cq_desc, CQE_TYPE_RX); if (__predict_false(cmp_err != 0)) { /* * Ups. Cannot finish now. * Let's try again later. */ goto done; } work_done++; break; case CQE_TYPE_SEND: nicvf_snd_pkt_handler(nic, cq, (void *)cq_desc, CQE_TYPE_SEND); tx_done++; break; case CQE_TYPE_INVALID: case CQE_TYPE_RX_SPLIT: case CQE_TYPE_RX_TCP: case CQE_TYPE_SEND_PTP: /* Ignore for now */ break; } processed_cqe++; } done: dprintf(nic->dev, "%s CQ%d processed_cqe %d work_done %d\n", __func__, cq_idx, processed_cqe, work_done); /* Ring doorbell to inform H/W to reuse processed CQEs */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, cq_idx, processed_cqe); if ((tx_done > 0) && ((if_getdrvflags(nic->ifp) & IFF_DRV_RUNNING) != 0)) { /* Reenable TXQ if its stopped earlier due to SQ full */ if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); } out: /* * Flush any outstanding LRO work */ rq_idx = cq_idx; rq = &nic->qs->rq[rq_idx]; lro = &rq->lro; tcp_lro_flush_all(lro); NICVF_CMP_UNLOCK(cq); ifp = nic->ifp; /* Push received MBUFs to the stack */ while (!buf_ring_empty(cq->rx_br)) { mbuf = buf_ring_dequeue_mc(cq->rx_br); if (__predict_true(mbuf != NULL)) (*ifp->if_input)(ifp, mbuf); } return (cmp_err); } /* * Qset error interrupt handler * * As of now only CQ errors are handled */ static void nicvf_qs_err_task(void *arg, int pending) { struct nicvf *nic; struct queue_set *qs; int qidx; uint64_t status; boolean_t enable = TRUE; nic = (struct nicvf *)arg; qs = nic->qs; /* Deactivate network interface */ if_setdrvflagbits(nic->ifp, IFF_DRV_OACTIVE, IFF_DRV_RUNNING); /* Check if it is CQ err */ for (qidx = 0; qidx < qs->cq_cnt; qidx++) { status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, qidx); if ((status & CQ_ERR_MASK) == 0) continue; /* Process already queued CQEs and reconfig CQ */ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx); nicvf_sq_disable(nic, qidx); (void)nicvf_cq_intr_handler(nic, qidx); nicvf_cmp_queue_config(nic, qs, qidx, enable); nicvf_sq_free_used_descs(nic, &qs->sq[qidx], qidx); nicvf_sq_enable(nic, &qs->sq[qidx], qidx); nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx); } if_setdrvflagbits(nic->ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); /* Re-enable Qset error interrupt */ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0); } static void nicvf_cmp_task(void *arg, int pending) { struct cmp_queue *cq; struct nicvf *nic; int cmp_err; cq = (struct cmp_queue *)arg; nic = cq->nic; /* Handle CQ descriptors */ cmp_err = nicvf_cq_intr_handler(nic, cq->idx); if (__predict_false(cmp_err != 0)) { /* * Schedule another thread here since we did not * process the entire CQ due to Tx or Rx CQ parse error. */ taskqueue_enqueue(cq->cmp_taskq, &cq->cmp_task); } nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); /* Reenable interrupt (previously disabled in nicvf_intr_handler() */ nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->idx); } /* Initialize completion queue */ static int nicvf_init_cmp_queue(struct nicvf *nic, struct cmp_queue *cq, int q_len, int qidx) { int err; /* Initizalize lock */ snprintf(cq->mtx_name, sizeof(cq->mtx_name), "%s: CQ(%d) lock", device_get_nameunit(nic->dev), qidx); mtx_init(&cq->mtx, cq->mtx_name, NULL, MTX_DEF); err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE, NICVF_CQ_BASE_ALIGN_BYTES); if (err != 0) { device_printf(nic->dev, "Could not allocate DMA memory for CQ\n"); return (err); } cq->desc = cq->dmem.base; cq->thresh = pass1_silicon(nic->dev) ? 0 : CMP_QUEUE_CQE_THRESH; cq->nic = nic; cq->idx = qidx; nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1; cq->rx_br = buf_ring_alloc(CMP_QUEUE_LEN * 8, M_DEVBUF, M_WAITOK, &cq->mtx); /* Allocate taskqueue */ TASK_INIT(&cq->cmp_task, 0, nicvf_cmp_task, cq); cq->cmp_taskq = taskqueue_create_fast("nicvf_cmp_taskq", M_WAITOK, taskqueue_thread_enqueue, &cq->cmp_taskq); taskqueue_start_threads(&cq->cmp_taskq, 1, PI_NET, "%s: cmp_taskq(%d)", device_get_nameunit(nic->dev), qidx); return (0); } static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq) { if (cq == NULL) return; /* * The completion queue itself should be disabled by now * (ref. nicvf_snd_queue_config()). * Ensure that it is safe to disable it or panic. */ if (cq->enable) panic("%s: Trying to free working CQ(%d)", __func__, cq->idx); if (cq->cmp_taskq != NULL) { /* Remove task */ while (taskqueue_cancel(cq->cmp_taskq, &cq->cmp_task, NULL) != 0) taskqueue_drain(cq->cmp_taskq, &cq->cmp_task); taskqueue_free(cq->cmp_taskq); cq->cmp_taskq = NULL; } /* * Completion interrupt will possibly enable interrupts again * so disable interrupting now after we finished processing * completion task. It is safe to do so since the corresponding CQ * was already disabled. */ nicvf_disable_intr(nic, NICVF_INTR_CQ, cq->idx); nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->idx); NICVF_CMP_LOCK(cq); nicvf_free_q_desc_mem(nic, &cq->dmem); drbr_free(cq->rx_br, M_DEVBUF); NICVF_CMP_UNLOCK(cq); mtx_destroy(&cq->mtx); memset(cq->mtx_name, 0, sizeof(cq->mtx_name)); } int nicvf_xmit_locked(struct snd_queue *sq) { struct nicvf *nic; struct ifnet *ifp; struct mbuf *next; int err; NICVF_TX_LOCK_ASSERT(sq); nic = sq->nic; ifp = nic->ifp; err = 0; while ((next = drbr_peek(ifp, sq->br)) != NULL) { /* Send a copy of the frame to the BPF listener */ ETHER_BPF_MTAP(ifp, next); err = nicvf_tx_mbuf_locked(sq, &next); if (err != 0) { if (next == NULL) drbr_advance(ifp, sq->br); else drbr_putback(ifp, sq->br, next); break; } drbr_advance(ifp, sq->br); } return (err); } static void nicvf_snd_task(void *arg, int pending) { struct snd_queue *sq = (struct snd_queue *)arg; struct nicvf *nic; struct ifnet *ifp; int err; nic = sq->nic; ifp = nic->ifp; /* * Skip sending anything if the driver is not running, * SQ full or link is down. */ if (((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) || !nic->link_up) return; NICVF_TX_LOCK(sq); err = nicvf_xmit_locked(sq); NICVF_TX_UNLOCK(sq); /* Try again */ if (err != 0) taskqueue_enqueue(sq->snd_taskq, &sq->snd_task); } /* Initialize transmit queue */ static int nicvf_init_snd_queue(struct nicvf *nic, struct snd_queue *sq, int q_len, int qidx) { size_t i; int err; /* Initizalize TX lock for this queue */ snprintf(sq->mtx_name, sizeof(sq->mtx_name), "%s: SQ(%d) lock", device_get_nameunit(nic->dev), qidx); mtx_init(&sq->mtx, sq->mtx_name, NULL, MTX_DEF); NICVF_TX_LOCK(sq); /* Allocate buffer ring */ sq->br = buf_ring_alloc(q_len / MIN_SQ_DESC_PER_PKT_XMIT, M_DEVBUF, M_NOWAIT, &sq->mtx); if (sq->br == NULL) { device_printf(nic->dev, "ERROR: Could not set up buf ring for SQ(%d)\n", qidx); err = ENOMEM; goto error; } /* Allocate DMA memory for Tx descriptors */ err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE, NICVF_SQ_BASE_ALIGN_BYTES); if (err != 0) { device_printf(nic->dev, "Could not allocate DMA memory for SQ\n"); goto error; } sq->desc = sq->dmem.base; sq->head = sq->tail = 0; atomic_store_rel_int(&sq->free_cnt, q_len - 1); sq->thresh = SND_QUEUE_THRESH; sq->idx = qidx; sq->nic = nic; /* * Allocate DMA maps for Tx buffers */ /* Create DMA tag first */ err = bus_dma_tag_create( bus_get_dma_tag(nic->dev), /* parent tag */ 1, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ NICVF_TSO_MAXSIZE, /* maxsize */ NICVF_TSO_NSEGS, /* nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sq->snd_buff_dmat); /* dmat */ if (err != 0) { device_printf(nic->dev, "Failed to create busdma tag for Tx buffers\n"); goto error; } /* Allocate send buffers array */ - sq->snd_buff = malloc(sizeof(*sq->snd_buff) * q_len, M_NICVF, + sq->snd_buff = mallocarray(q_len, sizeof(*sq->snd_buff), M_NICVF, (M_NOWAIT | M_ZERO)); if (sq->snd_buff == NULL) { device_printf(nic->dev, "Could not allocate memory for Tx buffers array\n"); err = ENOMEM; goto error; } /* Now populate maps */ for (i = 0; i < q_len; i++) { err = bus_dmamap_create(sq->snd_buff_dmat, 0, &sq->snd_buff[i].dmap); if (err != 0) { device_printf(nic->dev, "Failed to create DMA maps for Tx buffers\n"); goto error; } } NICVF_TX_UNLOCK(sq); /* Allocate taskqueue */ TASK_INIT(&sq->snd_task, 0, nicvf_snd_task, sq); sq->snd_taskq = taskqueue_create_fast("nicvf_snd_taskq", M_WAITOK, taskqueue_thread_enqueue, &sq->snd_taskq); taskqueue_start_threads(&sq->snd_taskq, 1, PI_NET, "%s: snd_taskq(%d)", device_get_nameunit(nic->dev), qidx); return (0); error: NICVF_TX_UNLOCK(sq); return (err); } static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq) { struct queue_set *qs = nic->qs; size_t i; int err; if (sq == NULL) return; if (sq->snd_taskq != NULL) { /* Remove task */ while (taskqueue_cancel(sq->snd_taskq, &sq->snd_task, NULL) != 0) taskqueue_drain(sq->snd_taskq, &sq->snd_task); taskqueue_free(sq->snd_taskq); sq->snd_taskq = NULL; } NICVF_TX_LOCK(sq); if (sq->snd_buff_dmat != NULL) { if (sq->snd_buff != NULL) { for (i = 0; i < qs->sq_len; i++) { m_freem(sq->snd_buff[i].mbuf); sq->snd_buff[i].mbuf = NULL; bus_dmamap_unload(sq->snd_buff_dmat, sq->snd_buff[i].dmap); err = bus_dmamap_destroy(sq->snd_buff_dmat, sq->snd_buff[i].dmap); /* * If bus_dmamap_destroy fails it can cause * random panic later if the tag is also * destroyed in the process. */ KASSERT(err == 0, ("%s: Could not destroy DMA map for SQ", __func__)); } } free(sq->snd_buff, M_NICVF); err = bus_dma_tag_destroy(sq->snd_buff_dmat); KASSERT(err == 0, ("%s: Trying to destroy BUSY DMA tag", __func__)); } /* Free private driver ring for this send queue */ if (sq->br != NULL) drbr_free(sq->br, M_DEVBUF); if (sq->dmem.base != NULL) nicvf_free_q_desc_mem(nic, &sq->dmem); NICVF_TX_UNLOCK(sq); /* Destroy Tx lock */ mtx_destroy(&sq->mtx); memset(sq->mtx_name, 0, sizeof(sq->mtx_name)); } static void nicvf_reclaim_snd_queue(struct nicvf *nic, struct queue_set *qs, int qidx) { /* Disable send queue */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); /* Check if SQ is stopped */ if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01)) return; /* Reset send queue */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); } static void nicvf_reclaim_rcv_queue(struct nicvf *nic, struct queue_set *qs, int qidx) { union nic_mbx mbx = {}; /* Make sure all packets in the pipeline are written back into mem */ mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; nicvf_send_msg_to_pf(nic, &mbx); } static void nicvf_reclaim_cmp_queue(struct nicvf *nic, struct queue_set *qs, int qidx) { /* Disable timer threshold (doesn't get reset upon CQ reset */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); /* Disable completion queue */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); /* Reset completion queue */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); } static void nicvf_reclaim_rbdr(struct nicvf *nic, struct rbdr *rbdr, int qidx) { uint64_t tmp, fifo_state; int timeout = 10; /* Save head and tail pointers for feeing up buffers */ rbdr->head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3; rbdr->tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3; /* * If RBDR FIFO is in 'FAIL' state then do a reset first * before relaiming. */ fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); if (((fifo_state >> 62) & 0x03) == 0x3) { nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, NICVF_RBDR_RESET); } /* Disable RBDR */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) return; while (1) { tmp = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_PREFETCH_STATUS, qidx); if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF)) break; DELAY(1000); timeout--; if (!timeout) { device_printf(nic->dev, "Failed polling on prefetch status\n"); return; } } nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, NICVF_RBDR_RESET); if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) return; nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) return; } /* Configures receive queue */ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, bool enable) { union nic_mbx mbx = {}; struct rcv_queue *rq; struct rq_cfg rq_cfg; struct ifnet *ifp; struct lro_ctrl *lro; ifp = nic->ifp; rq = &qs->rq[qidx]; rq->enable = enable; lro = &rq->lro; /* Disable receive queue */ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); if (!rq->enable) { nicvf_reclaim_rcv_queue(nic, qs, qidx); /* Free LRO memory */ tcp_lro_free(lro); rq->lro_enabled = FALSE; return; } /* Configure LRO if enabled */ rq->lro_enabled = FALSE; if ((if_getcapenable(ifp) & IFCAP_LRO) != 0) { if (tcp_lro_init(lro) != 0) { device_printf(nic->dev, "Failed to initialize LRO for RXQ%d\n", qidx); } else { rq->lro_enabled = TRUE; lro->ifp = nic->ifp; } } rq->cq_qs = qs->vnic_id; rq->cq_idx = qidx; rq->start_rbdr_qs = qs->vnic_id; rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; rq->cont_rbdr_qs = qs->vnic_id; rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; /* all writes of RBDR data to be loaded into L2 Cache as well*/ rq->caching = 1; /* Send a mailbox msg to PF to config RQ */ mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG; mbx.rq.qs_num = qs->vnic_id; mbx.rq.rq_num = qidx; mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) | (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) | (rq->cont_qs_rbdr_idx << 8) | (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx); nicvf_send_msg_to_pf(nic, &mbx); mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG; mbx.rq.cfg = (1UL << 63) | (1UL << 62) | (qs->vnic_id << 0); nicvf_send_msg_to_pf(nic, &mbx); /* * RQ drop config * Enable CQ drop to reserve sufficient CQEs for all tx packets */ mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG; mbx.rq.cfg = (1UL << 62) | (RQ_CQ_DROP << 8); nicvf_send_msg_to_pf(nic, &mbx); nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0, 0x00); /* Enable Receive queue */ rq_cfg.ena = 1; rq_cfg.tcp_ena = 0; nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(uint64_t *)&rq_cfg); } /* Configures completion queue */ static void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, boolean_t enable) { struct cmp_queue *cq; struct cq_cfg cq_cfg; cq = &qs->cq[qidx]; cq->enable = enable; if (!cq->enable) { nicvf_reclaim_cmp_queue(nic, qs, qidx); return; } /* Reset completion queue */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); /* Set completion queue base address */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, (uint64_t)(cq->dmem.phys_base)); /* Enable Completion queue */ cq_cfg.ena = 1; cq_cfg.reset = 0; cq_cfg.caching = 0; cq_cfg.qsize = CMP_QSIZE; cq_cfg.avg_con = 0; nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(uint64_t *)&cq_cfg); /* Set threshold value for interrupt generation */ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh); nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, nic->cq_coalesce_usecs); } /* Configures transmit queue */ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs, int qidx, boolean_t enable) { union nic_mbx mbx = {}; struct snd_queue *sq; struct sq_cfg sq_cfg; sq = &qs->sq[qidx]; sq->enable = enable; if (!sq->enable) { nicvf_reclaim_snd_queue(nic, qs, qidx); return; } /* Reset send queue */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); sq->cq_qs = qs->vnic_id; sq->cq_idx = qidx; /* Send a mailbox msg to PF to config SQ */ mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG; mbx.sq.qs_num = qs->vnic_id; mbx.sq.sq_num = qidx; mbx.sq.sqs_mode = nic->sqs_mode; mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx; nicvf_send_msg_to_pf(nic, &mbx); /* Set queue base address */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, (uint64_t)(sq->dmem.phys_base)); /* Enable send queue & set queue size */ sq_cfg.ena = 1; sq_cfg.reset = 0; sq_cfg.ldwb = 0; sq_cfg.qsize = SND_QSIZE; sq_cfg.tstmp_bgx_intf = 0; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(uint64_t *)&sq_cfg); /* Set threshold value for interrupt generation */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh); } /* Configures receive buffer descriptor ring */ static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs, int qidx, boolean_t enable) { struct rbdr *rbdr; struct rbdr_cfg rbdr_cfg; rbdr = &qs->rbdr[qidx]; nicvf_reclaim_rbdr(nic, rbdr, qidx); if (!enable) return; /* Set descriptor base address */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, (uint64_t)(rbdr->dmem.phys_base)); /* Enable RBDR & set queue size */ /* Buffer size should be in multiples of 128 bytes */ rbdr_cfg.ena = 1; rbdr_cfg.reset = 0; rbdr_cfg.ldwb = 0; rbdr_cfg.qsize = RBDR_SIZE; rbdr_cfg.avg_con = 0; rbdr_cfg.lines = rbdr->dma_size / 128; nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, *(uint64_t *)&rbdr_cfg); /* Notify HW */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, qidx, qs->rbdr_len - 1); /* Set threshold value for interrupt generation */ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH, qidx, rbdr->thresh - 1); } /* Requests PF to assign and enable Qset */ void nicvf_qset_config(struct nicvf *nic, boolean_t enable) { union nic_mbx mbx = {}; struct queue_set *qs; struct qs_cfg *qs_cfg; qs = nic->qs; if (qs == NULL) { device_printf(nic->dev, "Qset is still not allocated, don't init queues\n"); return; } qs->enable = enable; qs->vnic_id = nic->vf_id; /* Send a mailbox msg to PF to config Qset */ mbx.qs.msg = NIC_MBOX_MSG_QS_CFG; mbx.qs.num = qs->vnic_id; mbx.qs.cfg = 0; qs_cfg = (struct qs_cfg *)&mbx.qs.cfg; if (qs->enable) { qs_cfg->ena = 1; qs_cfg->vnic = qs->vnic_id; } nicvf_send_msg_to_pf(nic, &mbx); } static void nicvf_free_resources(struct nicvf *nic) { int qidx; struct queue_set *qs; qs = nic->qs; /* * Remove QS error task first since it has to be dead * to safely free completion queue tasks. */ if (qs->qs_err_taskq != NULL) { /* Shut down QS error tasks */ while (taskqueue_cancel(qs->qs_err_taskq, &qs->qs_err_task, NULL) != 0) { taskqueue_drain(qs->qs_err_taskq, &qs->qs_err_task); } taskqueue_free(qs->qs_err_taskq); qs->qs_err_taskq = NULL; } /* Free receive buffer descriptor ring */ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_free_rbdr(nic, &qs->rbdr[qidx]); /* Free completion queue */ for (qidx = 0; qidx < qs->cq_cnt; qidx++) nicvf_free_cmp_queue(nic, &qs->cq[qidx]); /* Free send queue */ for (qidx = 0; qidx < qs->sq_cnt; qidx++) nicvf_free_snd_queue(nic, &qs->sq[qidx]); } static int nicvf_alloc_resources(struct nicvf *nic) { struct queue_set *qs = nic->qs; int qidx; /* Alloc receive buffer descriptor ring */ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len, DMA_BUFFER_LEN, qidx)) goto alloc_fail; } /* Alloc send queue */ for (qidx = 0; qidx < qs->sq_cnt; qidx++) { if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len, qidx)) goto alloc_fail; } /* Alloc completion queue */ for (qidx = 0; qidx < qs->cq_cnt; qidx++) { if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len, qidx)) goto alloc_fail; } /* Allocate QS error taskqueue */ TASK_INIT(&qs->qs_err_task, 0, nicvf_qs_err_task, nic); qs->qs_err_taskq = taskqueue_create_fast("nicvf_qs_err_taskq", M_WAITOK, taskqueue_thread_enqueue, &qs->qs_err_taskq); taskqueue_start_threads(&qs->qs_err_taskq, 1, PI_NET, "%s: qs_taskq", device_get_nameunit(nic->dev)); return (0); alloc_fail: nicvf_free_resources(nic); return (ENOMEM); } int nicvf_set_qset_resources(struct nicvf *nic) { struct queue_set *qs; qs = malloc(sizeof(*qs), M_NICVF, (M_ZERO | M_WAITOK)); nic->qs = qs; /* Set count of each queue */ qs->rbdr_cnt = RBDR_CNT; qs->rq_cnt = RCV_QUEUE_CNT; qs->sq_cnt = SND_QUEUE_CNT; qs->cq_cnt = CMP_QUEUE_CNT; /* Set queue lengths */ qs->rbdr_len = RCV_BUF_COUNT; qs->sq_len = SND_QUEUE_LEN; qs->cq_len = CMP_QUEUE_LEN; nic->rx_queues = qs->rq_cnt; nic->tx_queues = qs->sq_cnt; return (0); } int nicvf_config_data_transfer(struct nicvf *nic, boolean_t enable) { boolean_t disable = FALSE; struct queue_set *qs; int qidx; qs = nic->qs; if (qs == NULL) return (0); if (enable) { if (nicvf_alloc_resources(nic) != 0) return (ENOMEM); for (qidx = 0; qidx < qs->sq_cnt; qidx++) nicvf_snd_queue_config(nic, qs, qidx, enable); for (qidx = 0; qidx < qs->cq_cnt; qidx++) nicvf_cmp_queue_config(nic, qs, qidx, enable); for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_rbdr_config(nic, qs, qidx, enable); for (qidx = 0; qidx < qs->rq_cnt; qidx++) nicvf_rcv_queue_config(nic, qs, qidx, enable); } else { for (qidx = 0; qidx < qs->rq_cnt; qidx++) nicvf_rcv_queue_config(nic, qs, qidx, disable); for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) nicvf_rbdr_config(nic, qs, qidx, disable); for (qidx = 0; qidx < qs->sq_cnt; qidx++) nicvf_snd_queue_config(nic, qs, qidx, disable); for (qidx = 0; qidx < qs->cq_cnt; qidx++) nicvf_cmp_queue_config(nic, qs, qidx, disable); nicvf_free_resources(nic); } return (0); } /* * Get a free desc from SQ * returns descriptor ponter & descriptor number */ static __inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt) { int qentry; qentry = sq->tail; atomic_subtract_int(&sq->free_cnt, desc_cnt); sq->tail += desc_cnt; sq->tail &= (sq->dmem.q_len - 1); return (qentry); } /* Free descriptor back to SQ for future use */ static void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt) { atomic_add_int(&sq->free_cnt, desc_cnt); sq->head += desc_cnt; sq->head &= (sq->dmem.q_len - 1); } static __inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry) { qentry++; qentry &= (sq->dmem.q_len - 1); return (qentry); } static void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx) { uint64_t sq_cfg; sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); sq_cfg |= NICVF_SQ_EN; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); /* Ring doorbell so that H/W restarts processing SQEs */ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); } static void nicvf_sq_disable(struct nicvf *nic, int qidx) { uint64_t sq_cfg; sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); sq_cfg &= ~NICVF_SQ_EN; nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg); } static void nicvf_sq_free_used_descs(struct nicvf *nic, struct snd_queue *sq, int qidx) { uint64_t head; struct snd_buff *snd_buff; struct sq_hdr_subdesc *hdr; NICVF_TX_LOCK(sq); head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; while (sq->head != head) { hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head); if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) { nicvf_put_sq_desc(sq, 1); continue; } snd_buff = &sq->snd_buff[sq->head]; if (snd_buff->mbuf != NULL) { bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); m_freem(snd_buff->mbuf); sq->snd_buff[sq->head].mbuf = NULL; } nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); } NICVF_TX_UNLOCK(sq); } /* * Add SQ HEADER subdescriptor. * First subdescriptor for every send descriptor. */ static __inline int nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry, int subdesc_cnt, struct mbuf *mbuf, int len) { struct nicvf *nic; struct sq_hdr_subdesc *hdr; struct ether_vlan_header *eh; #ifdef INET struct ip *ip; struct tcphdr *th; #endif uint16_t etype; int ehdrlen, iphlen, poff, proto; nic = sq->nic; hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry); sq->snd_buff[qentry].mbuf = mbuf; memset(hdr, 0, SND_QUEUE_DESC_SIZE); hdr->subdesc_type = SQ_DESC_TYPE_HEADER; /* Enable notification via CQE after processing SQE */ hdr->post_cqe = 1; /* No of subdescriptors following this */ hdr->subdesc_cnt = subdesc_cnt; hdr->tot_len = len; eh = mtod(mbuf, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; etype = ntohs(eh->evl_proto); } else { ehdrlen = ETHER_HDR_LEN; etype = ntohs(eh->evl_encap_proto); } poff = proto = -1; switch (etype) { #ifdef INET6 case ETHERTYPE_IPV6: if (mbuf->m_len < ehdrlen + sizeof(struct ip6_hdr)) { mbuf = m_pullup(mbuf, ehdrlen +sizeof(struct ip6_hdr)); sq->snd_buff[qentry].mbuf = NULL; if (mbuf == NULL) return (ENOBUFS); } poff = ip6_lasthdr(mbuf, ehdrlen, IPPROTO_IPV6, &proto); if (poff < 0) return (ENOBUFS); poff += ehdrlen; break; #endif #ifdef INET case ETHERTYPE_IP: if (mbuf->m_len < ehdrlen + sizeof(struct ip)) { mbuf = m_pullup(mbuf, ehdrlen + sizeof(struct ip)); sq->snd_buff[qentry].mbuf = mbuf; if (mbuf == NULL) return (ENOBUFS); } if (mbuf->m_pkthdr.csum_flags & CSUM_IP) hdr->csum_l3 = 1; /* Enable IP csum calculation */ ip = (struct ip *)(mbuf->m_data + ehdrlen); iphlen = ip->ip_hl << 2; poff = ehdrlen + iphlen; proto = ip->ip_p; break; #endif } #if defined(INET6) || defined(INET) if (poff > 0 && mbuf->m_pkthdr.csum_flags != 0) { switch (proto) { case IPPROTO_TCP: if ((mbuf->m_pkthdr.csum_flags & CSUM_TCP) == 0) break; if (mbuf->m_len < (poff + sizeof(struct tcphdr))) { mbuf = m_pullup(mbuf, poff + sizeof(struct tcphdr)); sq->snd_buff[qentry].mbuf = mbuf; if (mbuf == NULL) return (ENOBUFS); } hdr->csum_l4 = SEND_L4_CSUM_TCP; break; case IPPROTO_UDP: if ((mbuf->m_pkthdr.csum_flags & CSUM_UDP) == 0) break; if (mbuf->m_len < (poff + sizeof(struct udphdr))) { mbuf = m_pullup(mbuf, poff + sizeof(struct udphdr)); sq->snd_buff[qentry].mbuf = mbuf; if (mbuf == NULL) return (ENOBUFS); } hdr->csum_l4 = SEND_L4_CSUM_UDP; break; case IPPROTO_SCTP: if ((mbuf->m_pkthdr.csum_flags & CSUM_SCTP) == 0) break; if (mbuf->m_len < (poff + sizeof(struct sctphdr))) { mbuf = m_pullup(mbuf, poff + sizeof(struct sctphdr)); sq->snd_buff[qentry].mbuf = mbuf; if (mbuf == NULL) return (ENOBUFS); } hdr->csum_l4 = SEND_L4_CSUM_SCTP; break; default: break; } hdr->l3_offset = ehdrlen; hdr->l4_offset = poff; } if ((mbuf->m_pkthdr.tso_segsz != 0) && nic->hw_tso) { th = (struct tcphdr *)((caddr_t)(mbuf->m_data + poff)); hdr->tso = 1; hdr->tso_start = poff + (th->th_off * 4); hdr->tso_max_paysize = mbuf->m_pkthdr.tso_segsz; hdr->inner_l3_offset = ehdrlen - 2; nic->drv_stats.tx_tso++; } #endif return (0); } /* * SQ GATHER subdescriptor * Must follow HDR descriptor */ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry, int size, uint64_t data) { struct sq_gather_subdesc *gather; qentry &= (sq->dmem.q_len - 1); gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry); memset(gather, 0, SND_QUEUE_DESC_SIZE); gather->subdesc_type = SQ_DESC_TYPE_GATHER; gather->ld_type = NIC_SEND_LD_TYPE_E_LDD; gather->size = size; gather->addr = data; } /* Put an mbuf to a SQ for packet transfer. */ static int nicvf_tx_mbuf_locked(struct snd_queue *sq, struct mbuf **mbufp) { bus_dma_segment_t segs[256]; struct snd_buff *snd_buff; size_t seg; int nsegs, qentry; int subdesc_cnt; int err; NICVF_TX_LOCK_ASSERT(sq); if (sq->free_cnt == 0) return (ENOBUFS); snd_buff = &sq->snd_buff[sq->tail]; err = bus_dmamap_load_mbuf_sg(sq->snd_buff_dmat, snd_buff->dmap, *mbufp, segs, &nsegs, BUS_DMA_NOWAIT); if (__predict_false(err != 0)) { /* ARM64TODO: Add mbuf defragmenting if we lack maps */ m_freem(*mbufp); *mbufp = NULL; return (err); } /* Set how many subdescriptors is required */ subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT + nsegs - 1; if (subdesc_cnt > sq->free_cnt) { /* ARM64TODO: Add mbuf defragmentation if we lack descriptors */ bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); return (ENOBUFS); } qentry = nicvf_get_sq_desc(sq, subdesc_cnt); /* Add SQ header subdesc */ err = nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, *mbufp, (*mbufp)->m_pkthdr.len); if (err != 0) { nicvf_put_sq_desc(sq, subdesc_cnt); bus_dmamap_unload(sq->snd_buff_dmat, snd_buff->dmap); if (err == ENOBUFS) { m_freem(*mbufp); *mbufp = NULL; } return (err); } /* Add SQ gather subdescs */ for (seg = 0; seg < nsegs; seg++) { qentry = nicvf_get_nxt_sqentry(sq, qentry); nicvf_sq_add_gather_subdesc(sq, qentry, segs[seg].ds_len, segs[seg].ds_addr); } /* make sure all memory stores are done before ringing doorbell */ bus_dmamap_sync(sq->dmem.dmat, sq->dmem.dmap, BUS_DMASYNC_PREWRITE); dprintf(sq->nic->dev, "%s: sq->idx: %d, subdesc_cnt: %d\n", __func__, sq->idx, subdesc_cnt); /* Inform HW to xmit new packet */ nicvf_queue_reg_write(sq->nic, NIC_QSET_SQ_0_7_DOOR, sq->idx, subdesc_cnt); return (0); } static __inline u_int frag_num(u_int i) { #if BYTE_ORDER == BIG_ENDIAN return ((i & ~3) + 3 - (i & 3)); #else return (i); #endif } /* Returns MBUF for a received packet */ struct mbuf * nicvf_get_rcv_mbuf(struct nicvf *nic, struct cqe_rx_t *cqe_rx) { int frag; int payload_len = 0; struct mbuf *mbuf; struct mbuf *mbuf_frag; uint16_t *rb_lens = NULL; uint64_t *rb_ptrs = NULL; mbuf = NULL; rb_lens = (uint16_t *)((uint8_t *)cqe_rx + (3 * sizeof(uint64_t))); rb_ptrs = (uint64_t *)((uint8_t *)cqe_rx + (6 * sizeof(uint64_t))); dprintf(nic->dev, "%s rb_cnt %d rb0_ptr %lx rb0_sz %d\n", __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz); for (frag = 0; frag < cqe_rx->rb_cnt; frag++) { payload_len = rb_lens[frag_num(frag)]; if (frag == 0) { /* First fragment */ mbuf = nicvf_rb_ptr_to_mbuf(nic, (*rb_ptrs - cqe_rx->align_pad)); mbuf->m_len = payload_len; mbuf->m_data += cqe_rx->align_pad; if_setrcvif(mbuf, nic->ifp); } else { /* Add fragments */ mbuf_frag = nicvf_rb_ptr_to_mbuf(nic, *rb_ptrs); m_append(mbuf, payload_len, mbuf_frag->m_data); m_freem(mbuf_frag); } /* Next buffer pointer */ rb_ptrs++; } if (__predict_true(mbuf != NULL)) { m_fixhdr(mbuf); mbuf->m_pkthdr.flowid = cqe_rx->rq_idx; M_HASHTYPE_SET(mbuf, M_HASHTYPE_OPAQUE); if (__predict_true((if_getcapenable(nic->ifp) & IFCAP_RXCSUM) != 0)) { /* * HW by default verifies IP & TCP/UDP/SCTP checksums */ if (__predict_true(cqe_rx->l3_type == L3TYPE_IPV4)) { mbuf->m_pkthdr.csum_flags = (CSUM_IP_CHECKED | CSUM_IP_VALID); } switch (cqe_rx->l4_type) { case L4TYPE_UDP: case L4TYPE_TCP: /* fall through */ mbuf->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); mbuf->m_pkthdr.csum_data = 0xffff; break; case L4TYPE_SCTP: mbuf->m_pkthdr.csum_flags |= CSUM_SCTP_VALID; break; default: break; } } } return (mbuf); } /* Enable interrupt */ void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx) { uint64_t reg_val; reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); switch (int_type) { case NICVF_INTR_CQ: reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); break; case NICVF_INTR_TCP_TIMER: reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); break; case NICVF_INTR_MBOX: reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); break; case NICVF_INTR_QS_ERR: reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); break; default: device_printf(nic->dev, "Failed to enable interrupt: unknown type\n"); break; } nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val); } /* Disable interrupt */ void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx) { uint64_t reg_val = 0; switch (int_type) { case NICVF_INTR_CQ: reg_val |= ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: reg_val |= ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: reg_val |= ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: reg_val |= (1UL << NICVF_INTR_PKT_DROP_SHIFT); break; case NICVF_INTR_TCP_TIMER: reg_val |= (1UL << NICVF_INTR_TCP_TIMER_SHIFT); break; case NICVF_INTR_MBOX: reg_val |= (1UL << NICVF_INTR_MBOX_SHIFT); break; case NICVF_INTR_QS_ERR: reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); break; default: device_printf(nic->dev, "Failed to disable interrupt: unknown type\n"); break; } nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val); } /* Clear interrupt */ void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx) { uint64_t reg_val = 0; switch (int_type) { case NICVF_INTR_CQ: reg_val = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: reg_val = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: reg_val = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: reg_val = (1UL << NICVF_INTR_PKT_DROP_SHIFT); break; case NICVF_INTR_TCP_TIMER: reg_val = (1UL << NICVF_INTR_TCP_TIMER_SHIFT); break; case NICVF_INTR_MBOX: reg_val = (1UL << NICVF_INTR_MBOX_SHIFT); break; case NICVF_INTR_QS_ERR: reg_val |= (1UL << NICVF_INTR_QS_ERR_SHIFT); break; default: device_printf(nic->dev, "Failed to clear interrupt: unknown type\n"); break; } nicvf_reg_write(nic, NIC_VF_INT, reg_val); } /* Check if interrupt is enabled */ int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx) { uint64_t reg_val; uint64_t mask = 0xff; reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S); switch (int_type) { case NICVF_INTR_CQ: mask = ((1UL << q_idx) << NICVF_INTR_CQ_SHIFT); break; case NICVF_INTR_SQ: mask = ((1UL << q_idx) << NICVF_INTR_SQ_SHIFT); break; case NICVF_INTR_RBDR: mask = ((1UL << q_idx) << NICVF_INTR_RBDR_SHIFT); break; case NICVF_INTR_PKT_DROP: mask = NICVF_INTR_PKT_DROP_MASK; break; case NICVF_INTR_TCP_TIMER: mask = NICVF_INTR_TCP_TIMER_MASK; break; case NICVF_INTR_MBOX: mask = NICVF_INTR_MBOX_MASK; break; case NICVF_INTR_QS_ERR: mask = NICVF_INTR_QS_ERR_MASK; break; default: device_printf(nic->dev, "Failed to check interrupt enable: unknown type\n"); break; } return (reg_val & mask); } void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx) { struct rcv_queue *rq; #define GET_RQ_STATS(reg) \ nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\ (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) rq = &nic->qs->rq[rq_idx]; rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS); rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS); } void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx) { struct snd_queue *sq; #define GET_SQ_STATS(reg) \ nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\ (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3)) sq = &nic->qs->sq[sq_idx]; sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS); sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS); } /* Check for errors in the receive cmp.queue entry */ int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cmp_queue *cq, struct cqe_rx_t *cqe_rx) { struct nicvf_hw_stats *stats = &nic->hw_stats; struct nicvf_drv_stats *drv_stats = &nic->drv_stats; if (!cqe_rx->err_level && !cqe_rx->err_opcode) { drv_stats->rx_frames_ok++; return (0); } switch (cqe_rx->err_opcode) { case CQ_RX_ERROP_RE_PARTIAL: stats->rx_bgx_truncated_pkts++; break; case CQ_RX_ERROP_RE_JABBER: stats->rx_jabber_errs++; break; case CQ_RX_ERROP_RE_FCS: stats->rx_fcs_errs++; break; case CQ_RX_ERROP_RE_RX_CTL: stats->rx_bgx_errs++; break; case CQ_RX_ERROP_PREL2_ERR: stats->rx_prel2_errs++; break; case CQ_RX_ERROP_L2_MAL: stats->rx_l2_hdr_malformed++; break; case CQ_RX_ERROP_L2_OVERSIZE: stats->rx_oversize++; break; case CQ_RX_ERROP_L2_UNDERSIZE: stats->rx_undersize++; break; case CQ_RX_ERROP_L2_LENMISM: stats->rx_l2_len_mismatch++; break; case CQ_RX_ERROP_L2_PCLP: stats->rx_l2_pclp++; break; case CQ_RX_ERROP_IP_NOT: stats->rx_ip_ver_errs++; break; case CQ_RX_ERROP_IP_CSUM_ERR: stats->rx_ip_csum_errs++; break; case CQ_RX_ERROP_IP_MAL: stats->rx_ip_hdr_malformed++; break; case CQ_RX_ERROP_IP_MALD: stats->rx_ip_payload_malformed++; break; case CQ_RX_ERROP_IP_HOP: stats->rx_ip_ttl_errs++; break; case CQ_RX_ERROP_L3_PCLP: stats->rx_l3_pclp++; break; case CQ_RX_ERROP_L4_MAL: stats->rx_l4_malformed++; break; case CQ_RX_ERROP_L4_CHK: stats->rx_l4_csum_errs++; break; case CQ_RX_ERROP_UDP_LEN: stats->rx_udp_len_errs++; break; case CQ_RX_ERROP_L4_PORT: stats->rx_l4_port_errs++; break; case CQ_RX_ERROP_TCP_FLAG: stats->rx_tcp_flag_errs++; break; case CQ_RX_ERROP_TCP_OFFSET: stats->rx_tcp_offset_errs++; break; case CQ_RX_ERROP_L4_PCLP: stats->rx_l4_pclp++; break; case CQ_RX_ERROP_RBDR_TRUNC: stats->rx_truncated_pkts++; break; } return (1); } /* Check for errors in the send cmp.queue entry */ int nicvf_check_cqe_tx_errs(struct nicvf *nic, struct cmp_queue *cq, struct cqe_send_t *cqe_tx) { struct cmp_queue_stats *stats = &cq->stats; switch (cqe_tx->send_status) { case CQ_TX_ERROP_GOOD: stats->tx.good++; return (0); case CQ_TX_ERROP_DESC_FAULT: stats->tx.desc_fault++; break; case CQ_TX_ERROP_HDR_CONS_ERR: stats->tx.hdr_cons_err++; break; case CQ_TX_ERROP_SUBDC_ERR: stats->tx.subdesc_err++; break; case CQ_TX_ERROP_IMM_SIZE_OFLOW: stats->tx.imm_size_oflow++; break; case CQ_TX_ERROP_DATA_SEQUENCE_ERR: stats->tx.data_seq_err++; break; case CQ_TX_ERROP_MEM_SEQUENCE_ERR: stats->tx.mem_seq_err++; break; case CQ_TX_ERROP_LOCK_VIOL: stats->tx.lock_viol++; break; case CQ_TX_ERROP_DATA_FAULT: stats->tx.data_fault++; break; case CQ_TX_ERROP_TSTMP_CONFLICT: stats->tx.tstmp_conflict++; break; case CQ_TX_ERROP_TSTMP_TIMEOUT: stats->tx.tstmp_timeout++; break; case CQ_TX_ERROP_MEM_FAULT: stats->tx.mem_fault++; break; case CQ_TX_ERROP_CK_OVERLAP: stats->tx.csum_overlap++; break; case CQ_TX_ERROP_CK_OFLOW: stats->tx.csum_overflow++; break; } return (1); } Index: head/sys/dev/xen/blkback/blkback.c =================================================================== --- head/sys/dev/xen/blkback/blkback.c (revision 327948) +++ head/sys/dev/xen/blkback/blkback.c (revision 327949) @@ -1,3938 +1,3938 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009-2012 Spectra Logic Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * Authors: Justin T. Gibbs (Spectra Logic Corporation) * Ken Merry (Spectra Logic Corporation) */ #include __FBSDID("$FreeBSD$"); /** * \file blkback.c * * \brief Device driver supporting the vending of block storage from * a FreeBSD domain to other domains. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /*--------------------------- Compile-time Tunables --------------------------*/ /** * The maximum number of shared memory ring pages we will allow in a * negotiated block-front/back communication channel. Allow enough * ring space for all requests to be XBB_MAX_REQUEST_SIZE'd. */ #define XBB_MAX_RING_PAGES 32 /** * The maximum number of outstanding request blocks (request headers plus * additional segment blocks) we will allow in a negotiated block-front/back * communication channel. */ #define XBB_MAX_REQUESTS \ __CONST_RING_SIZE(blkif, PAGE_SIZE * XBB_MAX_RING_PAGES) /** * \brief Define to force all I/O to be performed on memory owned by the * backend device, with a copy-in/out to the remote domain's memory. * * \note This option is currently required when this driver's domain is * operating in HVM mode on a system using an IOMMU. * * This driver uses Xen's grant table API to gain access to the memory of * the remote domains it serves. When our domain is operating in PV mode, * the grant table mechanism directly updates our domain's page table entries * to point to the physical pages of the remote domain. This scheme guarantees * that blkback and the backing devices it uses can safely perform DMA * operations to satisfy requests. In HVM mode, Xen may use a HW IOMMU to * insure that our domain cannot DMA to pages owned by another domain. As * of Xen 4.0, IOMMU mappings for HVM guests are not updated via the grant * table API. For this reason, in HVM mode, we must bounce all requests into * memory that is mapped into our domain at domain startup and thus has * valid IOMMU mappings. */ #define XBB_USE_BOUNCE_BUFFERS /** * \brief Define to enable rudimentary request logging to the console. */ #undef XBB_DEBUG /*---------------------------------- Macros ----------------------------------*/ /** * Custom malloc type for all driver allocations. */ static MALLOC_DEFINE(M_XENBLOCKBACK, "xbbd", "Xen Block Back Driver Data"); #ifdef XBB_DEBUG #define DPRINTF(fmt, args...) \ printf("xbb(%s:%d): " fmt, __FUNCTION__, __LINE__, ##args) #else #define DPRINTF(fmt, args...) do {} while(0) #endif /** * The maximum mapped region size per request we will allow in a negotiated * block-front/back communication channel. */ #define XBB_MAX_REQUEST_SIZE \ MIN(MAXPHYS, BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) /** * The maximum number of segments (within a request header and accompanying * segment blocks) per request we will allow in a negotiated block-front/back * communication channel. */ #define XBB_MAX_SEGMENTS_PER_REQUEST \ (MIN(UIO_MAXIOV, \ MIN(BLKIF_MAX_SEGMENTS_PER_REQUEST, \ (XBB_MAX_REQUEST_SIZE / PAGE_SIZE) + 1))) /** * The maximum number of ring pages that we can allow per request list. * We limit this to the maximum number of segments per request, because * that is already a reasonable number of segments to aggregate. This * number should never be smaller than XBB_MAX_SEGMENTS_PER_REQUEST, * because that would leave situations where we can't dispatch even one * large request. */ #define XBB_MAX_SEGMENTS_PER_REQLIST XBB_MAX_SEGMENTS_PER_REQUEST /*--------------------------- Forward Declarations ---------------------------*/ struct xbb_softc; struct xbb_xen_req; static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...) __attribute__((format(printf, 3, 4))); static int xbb_shutdown(struct xbb_softc *xbb); /*------------------------------ Data Structures -----------------------------*/ STAILQ_HEAD(xbb_xen_req_list, xbb_xen_req); typedef enum { XBB_REQLIST_NONE = 0x00, XBB_REQLIST_MAPPED = 0x01 } xbb_reqlist_flags; struct xbb_xen_reqlist { /** * Back reference to the parent block back instance for this * request. Used during bio_done handling. */ struct xbb_softc *xbb; /** * BLKIF_OP code for this request. */ int operation; /** * Set to BLKIF_RSP_* to indicate request status. * * This field allows an error status to be recorded even if the * delivery of this status must be deferred. Deferred reporting * is necessary, for example, when an error is detected during * completion processing of one bio when other bios for this * request are still outstanding. */ int status; /** * Number of 512 byte sectors not transferred. */ int residual_512b_sectors; /** * Starting sector number of the first request in the list. */ off_t starting_sector_number; /** * If we're going to coalesce, the next contiguous sector would be * this one. */ off_t next_contig_sector; /** * Number of child requests in the list. */ int num_children; /** * Number of I/O requests still pending on the backend. */ int pendcnt; /** * Total number of segments for requests in the list. */ int nr_segments; /** * Flags for this particular request list. */ xbb_reqlist_flags flags; /** * Kernel virtual address space reserved for this request * list structure and used to map the remote domain's pages for * this I/O, into our domain's address space. */ uint8_t *kva; /** * Base, psuedo-physical address, corresponding to the start * of this request's kva region. */ uint64_t gnt_base; #ifdef XBB_USE_BOUNCE_BUFFERS /** * Pre-allocated domain local memory used to proxy remote * domain memory during I/O operations. */ uint8_t *bounce; #endif /** * Array of grant handles (one per page) used to map this request. */ grant_handle_t *gnt_handles; /** * Device statistics request ordering type (ordered or simple). */ devstat_tag_type ds_tag_type; /** * Device statistics request type (read, write, no_data). */ devstat_trans_flags ds_trans_type; /** * The start time for this request. */ struct bintime ds_t0; /** * Linked list of contiguous requests with the same operation type. */ struct xbb_xen_req_list contig_req_list; /** * Linked list links used to aggregate idle requests in the * request list free pool (xbb->reqlist_free_stailq) and pending * requests waiting for execution (xbb->reqlist_pending_stailq). */ STAILQ_ENTRY(xbb_xen_reqlist) links; }; STAILQ_HEAD(xbb_xen_reqlist_list, xbb_xen_reqlist); /** * \brief Object tracking an in-flight I/O from a Xen VBD consumer. */ struct xbb_xen_req { /** * Linked list links used to aggregate requests into a reqlist * and to store them in the request free pool. */ STAILQ_ENTRY(xbb_xen_req) links; /** * The remote domain's identifier for this I/O request. */ uint64_t id; /** * The number of pages currently mapped for this request. */ int nr_pages; /** * The number of 512 byte sectors comprising this requests. */ int nr_512b_sectors; /** * BLKIF_OP code for this request. */ int operation; /** * Storage used for non-native ring requests. */ blkif_request_t ring_req_storage; /** * Pointer to the Xen request in the ring. */ blkif_request_t *ring_req; /** * Consumer index for this request. */ RING_IDX req_ring_idx; /** * The start time for this request. */ struct bintime ds_t0; /** * Pointer back to our parent request list. */ struct xbb_xen_reqlist *reqlist; }; SLIST_HEAD(xbb_xen_req_slist, xbb_xen_req); /** * \brief Configuration data for the shared memory request ring * used to communicate with the front-end client of this * this driver. */ struct xbb_ring_config { /** KVA address where ring memory is mapped. */ vm_offset_t va; /** The pseudo-physical address where ring memory is mapped.*/ uint64_t gnt_addr; /** * Grant table handles, one per-ring page, returned by the * hyperpervisor upon mapping of the ring and required to * unmap it when a connection is torn down. */ grant_handle_t handle[XBB_MAX_RING_PAGES]; /** * The device bus address returned by the hypervisor when * mapping the ring and required to unmap it when a connection * is torn down. */ uint64_t bus_addr[XBB_MAX_RING_PAGES]; /** The number of ring pages mapped for the current connection. */ u_int ring_pages; /** * The grant references, one per-ring page, supplied by the * front-end, allowing us to reference the ring pages in the * front-end's domain and to map these pages into our own domain. */ grant_ref_t ring_ref[XBB_MAX_RING_PAGES]; /** The interrupt driven even channel used to signal ring events. */ evtchn_port_t evtchn; }; /** * Per-instance connection state flags. */ typedef enum { /** * The front-end requested a read-only mount of the * back-end device/file. */ XBBF_READ_ONLY = 0x01, /** Communication with the front-end has been established. */ XBBF_RING_CONNECTED = 0x02, /** * Front-end requests exist in the ring and are waiting for * xbb_xen_req objects to free up. */ XBBF_RESOURCE_SHORTAGE = 0x04, /** Connection teardown in progress. */ XBBF_SHUTDOWN = 0x08, /** A thread is already performing shutdown processing. */ XBBF_IN_SHUTDOWN = 0x10 } xbb_flag_t; /** Backend device type. */ typedef enum { /** Backend type unknown. */ XBB_TYPE_NONE = 0x00, /** * Backend type disk (access via cdev switch * strategy routine). */ XBB_TYPE_DISK = 0x01, /** Backend type file (access vnode operations.). */ XBB_TYPE_FILE = 0x02 } xbb_type; /** * \brief Structure used to memoize information about a per-request * scatter-gather list. * * The chief benefit of using this data structure is it avoids having * to reparse the possibly discontiguous S/G list in the original * request. Due to the way that the mapping of the memory backing an * I/O transaction is handled by Xen, a second pass is unavoidable. * At least this way the second walk is a simple array traversal. * * \note A single Scatter/Gather element in the block interface covers * at most 1 machine page. In this context a sector (blkif * nomenclature, not what I'd choose) is a 512b aligned unit * of mapping within the machine page referenced by an S/G * element. */ struct xbb_sg { /** The number of 512b data chunks mapped in this S/G element. */ int16_t nsect; /** * The index (0 based) of the first 512b data chunk mapped * in this S/G element. */ uint8_t first_sect; /** * The index (0 based) of the last 512b data chunk mapped * in this S/G element. */ uint8_t last_sect; }; /** * Character device backend specific configuration data. */ struct xbb_dev_data { /** Cdev used for device backend access. */ struct cdev *cdev; /** Cdev switch used for device backend access. */ struct cdevsw *csw; /** Used to hold a reference on opened cdev backend devices. */ int dev_ref; }; /** * File backend specific configuration data. */ struct xbb_file_data { /** Credentials to use for vnode backed (file based) I/O. */ struct ucred *cred; /** * \brief Array of io vectors used to process file based I/O. * * Only a single file based request is outstanding per-xbb instance, * so we only need one of these. */ struct iovec xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST]; #ifdef XBB_USE_BOUNCE_BUFFERS /** * \brief Array of io vectors used to handle bouncing of file reads. * * Vnode operations are free to modify uio data during their * exectuion. In the case of a read with bounce buffering active, * we need some of the data from the original uio in order to * bounce-out the read data. This array serves as the temporary * storage for this saved data. */ struct iovec saved_xiovecs[XBB_MAX_SEGMENTS_PER_REQLIST]; /** * \brief Array of memoized bounce buffer kva offsets used * in the file based backend. * * Due to the way that the mapping of the memory backing an * I/O transaction is handled by Xen, a second pass through * the request sg elements is unavoidable. We memoize the computed * bounce address here to reduce the cost of the second walk. */ void *xiovecs_vaddr[XBB_MAX_SEGMENTS_PER_REQLIST]; #endif /* XBB_USE_BOUNCE_BUFFERS */ }; /** * Collection of backend type specific data. */ union xbb_backend_data { struct xbb_dev_data dev; struct xbb_file_data file; }; /** * Function signature of backend specific I/O handlers. */ typedef int (*xbb_dispatch_t)(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, int operation, int flags); /** * Per-instance configuration data. */ struct xbb_softc { /** * Task-queue used to process I/O requests. */ struct taskqueue *io_taskqueue; /** * Single "run the request queue" task enqueued * on io_taskqueue. */ struct task io_task; /** Device type for this instance. */ xbb_type device_type; /** NewBus device corresponding to this instance. */ device_t dev; /** Backend specific dispatch routine for this instance. */ xbb_dispatch_t dispatch_io; /** The number of requests outstanding on the backend device/file. */ int active_request_count; /** Free pool of request tracking structures. */ struct xbb_xen_req_list request_free_stailq; /** Array, sized at connection time, of request tracking structures. */ struct xbb_xen_req *requests; /** Free pool of request list structures. */ struct xbb_xen_reqlist_list reqlist_free_stailq; /** List of pending request lists awaiting execution. */ struct xbb_xen_reqlist_list reqlist_pending_stailq; /** Array, sized at connection time, of request list structures. */ struct xbb_xen_reqlist *request_lists; /** * Global pool of kva used for mapping remote domain ring * and I/O transaction data. */ vm_offset_t kva; /** Psuedo-physical address corresponding to kva. */ uint64_t gnt_base_addr; /** The size of the global kva pool. */ int kva_size; /** The size of the KVA area used for request lists. */ int reqlist_kva_size; /** The number of pages of KVA used for request lists */ int reqlist_kva_pages; /** Bitmap of free KVA pages */ bitstr_t *kva_free; /** * \brief Cached value of the front-end's domain id. * * This value is used at once for each mapped page in * a transaction. We cache it to avoid incuring the * cost of an ivar access every time this is needed. */ domid_t otherend_id; /** * \brief The blkif protocol abi in effect. * * There are situations where the back and front ends can * have a different, native abi (e.g. intel x86_64 and * 32bit x86 domains on the same machine). The back-end * always accommodates the front-end's native abi. That * value is pulled from the XenStore and recorded here. */ int abi; /** * \brief The maximum number of requests and request lists allowed * to be in flight at a time. * * This value is negotiated via the XenStore. */ u_int max_requests; /** * \brief The maximum number of segments (1 page per segment) * that can be mapped by a request. * * This value is negotiated via the XenStore. */ u_int max_request_segments; /** * \brief Maximum number of segments per request list. * * This value is derived from and will generally be larger than * max_request_segments. */ u_int max_reqlist_segments; /** * The maximum size of any request to this back-end * device. * * This value is negotiated via the XenStore. */ u_int max_request_size; /** * The maximum size of any request list. This is derived directly * from max_reqlist_segments. */ u_int max_reqlist_size; /** Various configuration and state bit flags. */ xbb_flag_t flags; /** Ring mapping and interrupt configuration data. */ struct xbb_ring_config ring_config; /** Runtime, cross-abi safe, structures for ring access. */ blkif_back_rings_t rings; /** IRQ mapping for the communication ring event channel. */ xen_intr_handle_t xen_intr_handle; /** * \brief Backend access mode flags (e.g. write, or read-only). * * This value is passed to us by the front-end via the XenStore. */ char *dev_mode; /** * \brief Backend device type (e.g. "disk", "cdrom", "floppy"). * * This value is passed to us by the front-end via the XenStore. * Currently unused. */ char *dev_type; /** * \brief Backend device/file identifier. * * This value is passed to us by the front-end via the XenStore. * We expect this to be a POSIX path indicating the file or * device to open. */ char *dev_name; /** * Vnode corresponding to the backend device node or file * we are acessing. */ struct vnode *vn; union xbb_backend_data backend; /** The native sector size of the backend. */ u_int sector_size; /** log2 of sector_size. */ u_int sector_size_shift; /** Size in bytes of the backend device or file. */ off_t media_size; /** * \brief media_size expressed in terms of the backend native * sector size. * * (e.g. xbb->media_size >> xbb->sector_size_shift). */ uint64_t media_num_sectors; /** * \brief Array of memoized scatter gather data computed during the * conversion of blkif ring requests to internal xbb_xen_req * structures. * * Ring processing is serialized so we only need one of these. */ struct xbb_sg xbb_sgs[XBB_MAX_SEGMENTS_PER_REQLIST]; /** * Temporary grant table map used in xbb_dispatch_io(). When * XBB_MAX_SEGMENTS_PER_REQLIST gets large, keeping this on the * stack could cause a stack overflow. */ struct gnttab_map_grant_ref maps[XBB_MAX_SEGMENTS_PER_REQLIST]; /** Mutex protecting per-instance data. */ struct mtx lock; /** * Resource representing allocated physical address space * associated with our per-instance kva region. */ struct resource *pseudo_phys_res; /** Resource id for allocated physical address space. */ int pseudo_phys_res_id; /** * I/O statistics from BlockBack dispatch down. These are * coalesced requests, and we start them right before execution. */ struct devstat *xbb_stats; /** * I/O statistics coming into BlockBack. These are the requests as * we get them from BlockFront. They are started as soon as we * receive a request, and completed when the I/O is complete. */ struct devstat *xbb_stats_in; /** Disable sending flush to the backend */ int disable_flush; /** Send a real flush for every N flush requests */ int flush_interval; /** Count of flush requests in the interval */ int flush_count; /** Don't coalesce requests if this is set */ int no_coalesce_reqs; /** Number of requests we have received */ uint64_t reqs_received; /** Number of requests we have completed*/ uint64_t reqs_completed; /** Number of requests we queued but not pushed*/ uint64_t reqs_queued_for_completion; /** Number of requests we completed with an error status*/ uint64_t reqs_completed_with_error; /** How many forced dispatches (i.e. without coalescing) have happened */ uint64_t forced_dispatch; /** How many normal dispatches have happened */ uint64_t normal_dispatch; /** How many total dispatches have happened */ uint64_t total_dispatch; /** How many times we have run out of KVA */ uint64_t kva_shortages; /** How many times we have run out of request structures */ uint64_t request_shortages; /** Watch to wait for hotplug script execution */ struct xs_watch hotplug_watch; }; /*---------------------------- Request Processing ----------------------------*/ /** * Allocate an internal transaction tracking structure from the free pool. * * \param xbb Per-instance xbb configuration structure. * * \return On success, a pointer to the allocated xbb_xen_req structure. * Otherwise NULL. */ static inline struct xbb_xen_req * xbb_get_req(struct xbb_softc *xbb) { struct xbb_xen_req *req; req = NULL; mtx_assert(&xbb->lock, MA_OWNED); if ((req = STAILQ_FIRST(&xbb->request_free_stailq)) != NULL) { STAILQ_REMOVE_HEAD(&xbb->request_free_stailq, links); xbb->active_request_count++; } return (req); } /** * Return an allocated transaction tracking structure to the free pool. * * \param xbb Per-instance xbb configuration structure. * \param req The request structure to free. */ static inline void xbb_release_req(struct xbb_softc *xbb, struct xbb_xen_req *req) { mtx_assert(&xbb->lock, MA_OWNED); STAILQ_INSERT_HEAD(&xbb->request_free_stailq, req, links); xbb->active_request_count--; KASSERT(xbb->active_request_count >= 0, ("xbb_release_req: negative active count")); } /** * Return an xbb_xen_req_list of allocated xbb_xen_reqs to the free pool. * * \param xbb Per-instance xbb configuration structure. * \param req_list The list of requests to free. * \param nreqs The number of items in the list. */ static inline void xbb_release_reqs(struct xbb_softc *xbb, struct xbb_xen_req_list *req_list, int nreqs) { mtx_assert(&xbb->lock, MA_OWNED); STAILQ_CONCAT(&xbb->request_free_stailq, req_list); xbb->active_request_count -= nreqs; KASSERT(xbb->active_request_count >= 0, ("xbb_release_reqs: negative active count")); } /** * Given a page index and 512b sector offset within that page, * calculate an offset into a request's kva region. * * \param reqlist The request structure whose kva region will be accessed. * \param pagenr The page index used to compute the kva offset. * \param sector The 512b sector index used to compute the page relative * kva offset. * * \return The computed global KVA offset. */ static inline uint8_t * xbb_reqlist_vaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) { return (reqlist->kva + (PAGE_SIZE * pagenr) + (sector << 9)); } #ifdef XBB_USE_BOUNCE_BUFFERS /** * Given a page index and 512b sector offset within that page, * calculate an offset into a request's local bounce memory region. * * \param reqlist The request structure whose bounce region will be accessed. * \param pagenr The page index used to compute the bounce offset. * \param sector The 512b sector index used to compute the page relative * bounce offset. * * \return The computed global bounce buffer address. */ static inline uint8_t * xbb_reqlist_bounce_addr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) { return (reqlist->bounce + (PAGE_SIZE * pagenr) + (sector << 9)); } #endif /** * Given a page number and 512b sector offset within that page, * calculate an offset into the request's memory region that the * underlying backend device/file should use for I/O. * * \param reqlist The request structure whose I/O region will be accessed. * \param pagenr The page index used to compute the I/O offset. * \param sector The 512b sector index used to compute the page relative * I/O offset. * * \return The computed global I/O address. * * Depending on configuration, this will either be a local bounce buffer * or a pointer to the memory mapped in from the front-end domain for * this request. */ static inline uint8_t * xbb_reqlist_ioaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) { #ifdef XBB_USE_BOUNCE_BUFFERS return (xbb_reqlist_bounce_addr(reqlist, pagenr, sector)); #else return (xbb_reqlist_vaddr(reqlist, pagenr, sector)); #endif } /** * Given a page index and 512b sector offset within that page, calculate * an offset into the local psuedo-physical address space used to map a * front-end's request data into a request. * * \param reqlist The request list structure whose pseudo-physical region * will be accessed. * \param pagenr The page index used to compute the pseudo-physical offset. * \param sector The 512b sector index used to compute the page relative * pseudo-physical offset. * * \return The computed global pseudo-phsyical address. * * Depending on configuration, this will either be a local bounce buffer * or a pointer to the memory mapped in from the front-end domain for * this request. */ static inline uintptr_t xbb_get_gntaddr(struct xbb_xen_reqlist *reqlist, int pagenr, int sector) { struct xbb_softc *xbb; xbb = reqlist->xbb; return ((uintptr_t)(xbb->gnt_base_addr + (uintptr_t)(reqlist->kva - xbb->kva) + (PAGE_SIZE * pagenr) + (sector << 9))); } /** * Get Kernel Virtual Address space for mapping requests. * * \param xbb Per-instance xbb configuration structure. * \param nr_pages Number of pages needed. * \param check_only If set, check for free KVA but don't allocate it. * \param have_lock If set, xbb lock is already held. * * \return On success, a pointer to the allocated KVA region. Otherwise NULL. * * Note: This should be unnecessary once we have either chaining or * scatter/gather support for struct bio. At that point we'll be able to * put multiple addresses and lengths in one bio/bio chain and won't need * to map everything into one virtual segment. */ static uint8_t * xbb_get_kva(struct xbb_softc *xbb, int nr_pages) { int first_clear; int num_clear; uint8_t *free_kva; int i; KASSERT(nr_pages != 0, ("xbb_get_kva of zero length")); first_clear = 0; free_kva = NULL; mtx_lock(&xbb->lock); /* * Look for the first available page. If there are none, we're done. */ bit_ffc(xbb->kva_free, xbb->reqlist_kva_pages, &first_clear); if (first_clear == -1) goto bailout; /* * Starting at the first available page, look for consecutive free * pages that will satisfy the user's request. */ for (i = first_clear, num_clear = 0; i < xbb->reqlist_kva_pages; i++) { /* * If this is true, the page is used, so we have to reset * the number of clear pages and the first clear page * (since it pointed to a region with an insufficient number * of clear pages). */ if (bit_test(xbb->kva_free, i)) { num_clear = 0; first_clear = -1; continue; } if (first_clear == -1) first_clear = i; /* * If this is true, we've found a large enough free region * to satisfy the request. */ if (++num_clear == nr_pages) { bit_nset(xbb->kva_free, first_clear, first_clear + nr_pages - 1); free_kva = xbb->kva + (uint8_t *)((intptr_t)first_clear * PAGE_SIZE); KASSERT(free_kva >= (uint8_t *)xbb->kva && free_kva + (nr_pages * PAGE_SIZE) <= (uint8_t *)xbb->ring_config.va, ("Free KVA %p len %d out of range, " "kva = %#jx, ring VA = %#jx\n", free_kva, nr_pages * PAGE_SIZE, (uintmax_t)xbb->kva, (uintmax_t)xbb->ring_config.va)); break; } } bailout: if (free_kva == NULL) { xbb->flags |= XBBF_RESOURCE_SHORTAGE; xbb->kva_shortages++; } mtx_unlock(&xbb->lock); return (free_kva); } /** * Free allocated KVA. * * \param xbb Per-instance xbb configuration structure. * \param kva_ptr Pointer to allocated KVA region. * \param nr_pages Number of pages in the KVA region. */ static void xbb_free_kva(struct xbb_softc *xbb, uint8_t *kva_ptr, int nr_pages) { intptr_t start_page; mtx_assert(&xbb->lock, MA_OWNED); start_page = (intptr_t)(kva_ptr - xbb->kva) >> PAGE_SHIFT; bit_nclear(xbb->kva_free, start_page, start_page + nr_pages - 1); } /** * Unmap the front-end pages associated with this I/O request. * * \param req The request structure to unmap. */ static void xbb_unmap_reqlist(struct xbb_xen_reqlist *reqlist) { struct gnttab_unmap_grant_ref unmap[XBB_MAX_SEGMENTS_PER_REQLIST]; u_int i; u_int invcount; int error; invcount = 0; for (i = 0; i < reqlist->nr_segments; i++) { if (reqlist->gnt_handles[i] == GRANT_REF_INVALID) continue; unmap[invcount].host_addr = xbb_get_gntaddr(reqlist, i, 0); unmap[invcount].dev_bus_addr = 0; unmap[invcount].handle = reqlist->gnt_handles[i]; reqlist->gnt_handles[i] = GRANT_REF_INVALID; invcount++; } error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, invcount); KASSERT(error == 0, ("Grant table operation failed")); } /** * Allocate an internal transaction tracking structure from the free pool. * * \param xbb Per-instance xbb configuration structure. * * \return On success, a pointer to the allocated xbb_xen_reqlist structure. * Otherwise NULL. */ static inline struct xbb_xen_reqlist * xbb_get_reqlist(struct xbb_softc *xbb) { struct xbb_xen_reqlist *reqlist; reqlist = NULL; mtx_assert(&xbb->lock, MA_OWNED); if ((reqlist = STAILQ_FIRST(&xbb->reqlist_free_stailq)) != NULL) { STAILQ_REMOVE_HEAD(&xbb->reqlist_free_stailq, links); reqlist->flags = XBB_REQLIST_NONE; reqlist->kva = NULL; reqlist->status = BLKIF_RSP_OKAY; reqlist->residual_512b_sectors = 0; reqlist->num_children = 0; reqlist->nr_segments = 0; STAILQ_INIT(&reqlist->contig_req_list); } return (reqlist); } /** * Return an allocated transaction tracking structure to the free pool. * * \param xbb Per-instance xbb configuration structure. * \param req The request list structure to free. * \param wakeup If set, wakeup the work thread if freeing this reqlist * during a resource shortage condition. */ static inline void xbb_release_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, int wakeup) { mtx_assert(&xbb->lock, MA_OWNED); if (wakeup) { wakeup = xbb->flags & XBBF_RESOURCE_SHORTAGE; xbb->flags &= ~XBBF_RESOURCE_SHORTAGE; } if (reqlist->kva != NULL) xbb_free_kva(xbb, reqlist->kva, reqlist->nr_segments); xbb_release_reqs(xbb, &reqlist->contig_req_list, reqlist->num_children); STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links); if ((xbb->flags & XBBF_SHUTDOWN) != 0) { /* * Shutdown is in progress. See if we can * progress further now that one more request * has completed and been returned to the * free pool. */ xbb_shutdown(xbb); } if (wakeup != 0) taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); } /** * Request resources and do basic request setup. * * \param xbb Per-instance xbb configuration structure. * \param reqlist Pointer to reqlist pointer. * \param ring_req Pointer to a block ring request. * \param ring_index The ring index of this request. * * \return 0 for success, non-zero for failure. */ static int xbb_get_resources(struct xbb_softc *xbb, struct xbb_xen_reqlist **reqlist, blkif_request_t *ring_req, RING_IDX ring_idx) { struct xbb_xen_reqlist *nreqlist; struct xbb_xen_req *nreq; nreqlist = NULL; nreq = NULL; mtx_lock(&xbb->lock); /* * We don't allow new resources to be allocated if we're in the * process of shutting down. */ if ((xbb->flags & XBBF_SHUTDOWN) != 0) { mtx_unlock(&xbb->lock); return (1); } /* * Allocate a reqlist if the caller doesn't have one already. */ if (*reqlist == NULL) { nreqlist = xbb_get_reqlist(xbb); if (nreqlist == NULL) goto bailout_error; } /* We always allocate a request. */ nreq = xbb_get_req(xbb); if (nreq == NULL) goto bailout_error; mtx_unlock(&xbb->lock); if (*reqlist == NULL) { *reqlist = nreqlist; nreqlist->operation = ring_req->operation; nreqlist->starting_sector_number = ring_req->sector_number; STAILQ_INSERT_TAIL(&xbb->reqlist_pending_stailq, nreqlist, links); } nreq->reqlist = *reqlist; nreq->req_ring_idx = ring_idx; nreq->id = ring_req->id; nreq->operation = ring_req->operation; if (xbb->abi != BLKIF_PROTOCOL_NATIVE) { bcopy(ring_req, &nreq->ring_req_storage, sizeof(*ring_req)); nreq->ring_req = &nreq->ring_req_storage; } else { nreq->ring_req = ring_req; } binuptime(&nreq->ds_t0); devstat_start_transaction(xbb->xbb_stats_in, &nreq->ds_t0); STAILQ_INSERT_TAIL(&(*reqlist)->contig_req_list, nreq, links); (*reqlist)->num_children++; (*reqlist)->nr_segments += ring_req->nr_segments; return (0); bailout_error: /* * We're out of resources, so set the shortage flag. The next time * a request is released, we'll try waking up the work thread to * see if we can allocate more resources. */ xbb->flags |= XBBF_RESOURCE_SHORTAGE; xbb->request_shortages++; if (nreq != NULL) xbb_release_req(xbb, nreq); if (nreqlist != NULL) xbb_release_reqlist(xbb, nreqlist, /*wakeup*/ 0); mtx_unlock(&xbb->lock); return (1); } /** * Create and queue a response to a blkif request. * * \param xbb Per-instance xbb configuration structure. * \param req The request structure to which to respond. * \param status The status code to report. See BLKIF_RSP_* * in sys/xen/interface/io/blkif.h. */ static void xbb_queue_response(struct xbb_softc *xbb, struct xbb_xen_req *req, int status) { blkif_response_t *resp; /* * The mutex is required here, and should be held across this call * until after the subsequent call to xbb_push_responses(). This * is to guarantee that another context won't queue responses and * push them while we're active. * * That could lead to the other end being notified of responses * before the resources have been freed on this end. The other end * would then be able to queue additional I/O, and we may run out * of resources because we haven't freed them all yet. */ mtx_assert(&xbb->lock, MA_OWNED); /* * Place on the response ring for the relevant domain. * For now, only the spacing between entries is different * in the different ABIs, not the response entry layout. */ switch (xbb->abi) { case BLKIF_PROTOCOL_NATIVE: resp = RING_GET_RESPONSE(&xbb->rings.native, xbb->rings.native.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_32: resp = (blkif_response_t *) RING_GET_RESPONSE(&xbb->rings.x86_32, xbb->rings.x86_32.rsp_prod_pvt); break; case BLKIF_PROTOCOL_X86_64: resp = (blkif_response_t *) RING_GET_RESPONSE(&xbb->rings.x86_64, xbb->rings.x86_64.rsp_prod_pvt); break; default: panic("Unexpected blkif protocol ABI."); } resp->id = req->id; resp->operation = req->operation; resp->status = status; if (status != BLKIF_RSP_OKAY) xbb->reqs_completed_with_error++; xbb->rings.common.rsp_prod_pvt++; xbb->reqs_queued_for_completion++; } /** * Send queued responses to blkif requests. * * \param xbb Per-instance xbb configuration structure. * \param run_taskqueue Flag that is set to 1 if the taskqueue * should be run, 0 if it does not need to be run. * \param notify Flag that is set to 1 if the other end should be * notified via irq, 0 if the other end should not be * notified. */ static void xbb_push_responses(struct xbb_softc *xbb, int *run_taskqueue, int *notify) { int more_to_do; /* * The mutex is required here. */ mtx_assert(&xbb->lock, MA_OWNED); more_to_do = 0; RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&xbb->rings.common, *notify); if (xbb->rings.common.rsp_prod_pvt == xbb->rings.common.req_cons) { /* * Tail check for pending requests. Allows frontend to avoid * notifications if requests are already in flight (lower * overheads and promotes batching). */ RING_FINAL_CHECK_FOR_REQUESTS(&xbb->rings.common, more_to_do); } else if (RING_HAS_UNCONSUMED_REQUESTS(&xbb->rings.common)) { more_to_do = 1; } xbb->reqs_completed += xbb->reqs_queued_for_completion; xbb->reqs_queued_for_completion = 0; *run_taskqueue = more_to_do; } /** * Complete a request list. * * \param xbb Per-instance xbb configuration structure. * \param reqlist Allocated internal request list structure. */ static void xbb_complete_reqlist(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist) { struct xbb_xen_req *nreq; off_t sectors_sent; int notify, run_taskqueue; sectors_sent = 0; if (reqlist->flags & XBB_REQLIST_MAPPED) xbb_unmap_reqlist(reqlist); mtx_lock(&xbb->lock); /* * All I/O is done, send the response. A lock is not necessary * to protect the request list, because all requests have * completed. Therefore this is the only context accessing this * reqlist right now. However, in order to make sure that no one * else queues responses onto the queue or pushes them to the other * side while we're active, we need to hold the lock across the * calls to xbb_queue_response() and xbb_push_responses(). */ STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) { off_t cur_sectors_sent; /* Put this response on the ring, but don't push yet */ xbb_queue_response(xbb, nreq, reqlist->status); /* We don't report bytes sent if there is an error. */ if (reqlist->status == BLKIF_RSP_OKAY) cur_sectors_sent = nreq->nr_512b_sectors; else cur_sectors_sent = 0; sectors_sent += cur_sectors_sent; devstat_end_transaction(xbb->xbb_stats_in, /*bytes*/cur_sectors_sent << 9, reqlist->ds_tag_type, reqlist->ds_trans_type, /*now*/NULL, /*then*/&nreq->ds_t0); } /* * Take out any sectors not sent. If we wind up negative (which * might happen if an error is reported as well as a residual), just * report 0 sectors sent. */ sectors_sent -= reqlist->residual_512b_sectors; if (sectors_sent < 0) sectors_sent = 0; devstat_end_transaction(xbb->xbb_stats, /*bytes*/ sectors_sent << 9, reqlist->ds_tag_type, reqlist->ds_trans_type, /*now*/NULL, /*then*/&reqlist->ds_t0); xbb_release_reqlist(xbb, reqlist, /*wakeup*/ 1); xbb_push_responses(xbb, &run_taskqueue, ¬ify); mtx_unlock(&xbb->lock); if (run_taskqueue) taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); if (notify) xen_intr_signal(xbb->xen_intr_handle); } /** * Completion handler for buffer I/O requests issued by the device * backend driver. * * \param bio The buffer I/O request on which to perform completion * processing. */ static void xbb_bio_done(struct bio *bio) { struct xbb_softc *xbb; struct xbb_xen_reqlist *reqlist; reqlist = bio->bio_caller1; xbb = reqlist->xbb; reqlist->residual_512b_sectors += bio->bio_resid >> 9; /* * This is a bit imprecise. With aggregated I/O a single * request list can contain multiple front-end requests and * a multiple bios may point to a single request. By carefully * walking the request list, we could map residuals and errors * back to the original front-end request, but the interface * isn't sufficiently rich for us to properly report the error. * So, we just treat the entire request list as having failed if an * error occurs on any part. And, if an error occurs, we treat * the amount of data transferred as 0. * * For residuals, we report it on the overall aggregated device, * but not on the individual requests, since we don't currently * do the work to determine which front-end request to which the * residual applies. */ if (bio->bio_error) { DPRINTF("BIO returned error %d for operation on device %s\n", bio->bio_error, xbb->dev_name); reqlist->status = BLKIF_RSP_ERROR; if (bio->bio_error == ENXIO && xenbus_get_state(xbb->dev) == XenbusStateConnected) { /* * Backend device has disappeared. Signal the * front-end that we (the device proxy) want to * go away. */ xenbus_set_state(xbb->dev, XenbusStateClosing); } } #ifdef XBB_USE_BOUNCE_BUFFERS if (bio->bio_cmd == BIO_READ) { vm_offset_t kva_offset; kva_offset = (vm_offset_t)bio->bio_data - (vm_offset_t)reqlist->bounce; memcpy((uint8_t *)reqlist->kva + kva_offset, bio->bio_data, bio->bio_bcount); } #endif /* XBB_USE_BOUNCE_BUFFERS */ /* * Decrement the pending count for the request list. When we're * done with the requests, send status back for all of them. */ if (atomic_fetchadd_int(&reqlist->pendcnt, -1) == 1) xbb_complete_reqlist(xbb, reqlist); g_destroy_bio(bio); } /** * Parse a blkif request into an internal request structure and send * it to the backend for processing. * * \param xbb Per-instance xbb configuration structure. * \param reqlist Allocated internal request list structure. * * \return On success, 0. For resource shortages, non-zero. * * This routine performs the backend common aspects of request parsing * including compiling an internal request structure, parsing the S/G * list and any secondary ring requests in which they may reside, and * the mapping of front-end I/O pages into our domain. */ static int xbb_dispatch_io(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist) { struct xbb_sg *xbb_sg; struct gnttab_map_grant_ref *map; struct blkif_request_segment *sg; struct blkif_request_segment *last_block_sg; struct xbb_xen_req *nreq; u_int nseg; u_int seg_idx; u_int block_segs; int nr_sects; int total_sects; int operation; uint8_t bio_flags; int error; reqlist->ds_tag_type = DEVSTAT_TAG_SIMPLE; bio_flags = 0; total_sects = 0; nr_sects = 0; /* * First determine whether we have enough free KVA to satisfy this * request list. If not, tell xbb_run_queue() so it can go to * sleep until we have more KVA. */ reqlist->kva = NULL; if (reqlist->nr_segments != 0) { reqlist->kva = xbb_get_kva(xbb, reqlist->nr_segments); if (reqlist->kva == NULL) { /* * If we're out of KVA, return ENOMEM. */ return (ENOMEM); } } binuptime(&reqlist->ds_t0); devstat_start_transaction(xbb->xbb_stats, &reqlist->ds_t0); switch (reqlist->operation) { case BLKIF_OP_WRITE_BARRIER: bio_flags |= BIO_ORDERED; reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED; /* FALLTHROUGH */ case BLKIF_OP_WRITE: operation = BIO_WRITE; reqlist->ds_trans_type = DEVSTAT_WRITE; if ((xbb->flags & XBBF_READ_ONLY) != 0) { DPRINTF("Attempt to write to read only device %s\n", xbb->dev_name); reqlist->status = BLKIF_RSP_ERROR; goto send_response; } break; case BLKIF_OP_READ: operation = BIO_READ; reqlist->ds_trans_type = DEVSTAT_READ; break; case BLKIF_OP_FLUSH_DISKCACHE: /* * If this is true, the user has requested that we disable * flush support. So we just complete the requests * successfully. */ if (xbb->disable_flush != 0) { goto send_response; } /* * The user has requested that we only send a real flush * for every N flush requests. So keep count, and either * complete the request immediately or queue it for the * backend. */ if (xbb->flush_interval != 0) { if (++(xbb->flush_count) < xbb->flush_interval) { goto send_response; } else xbb->flush_count = 0; } operation = BIO_FLUSH; reqlist->ds_tag_type = DEVSTAT_TAG_ORDERED; reqlist->ds_trans_type = DEVSTAT_NO_DATA; goto do_dispatch; /*NOTREACHED*/ default: DPRINTF("error: unknown block io operation [%d]\n", reqlist->operation); reqlist->status = BLKIF_RSP_ERROR; goto send_response; } reqlist->xbb = xbb; xbb_sg = xbb->xbb_sgs; map = xbb->maps; seg_idx = 0; STAILQ_FOREACH(nreq, &reqlist->contig_req_list, links) { blkif_request_t *ring_req; RING_IDX req_ring_idx; u_int req_seg_idx; ring_req = nreq->ring_req; req_ring_idx = nreq->req_ring_idx; nr_sects = 0; nseg = ring_req->nr_segments; nreq->nr_pages = nseg; nreq->nr_512b_sectors = 0; req_seg_idx = 0; sg = NULL; /* Check that number of segments is sane. */ if (__predict_false(nseg == 0) || __predict_false(nseg > xbb->max_request_segments)) { DPRINTF("Bad number of segments in request (%d)\n", nseg); reqlist->status = BLKIF_RSP_ERROR; goto send_response; } block_segs = nseg; sg = ring_req->seg; last_block_sg = sg + block_segs; while (sg < last_block_sg) { KASSERT(seg_idx < XBB_MAX_SEGMENTS_PER_REQLIST, ("seg_idx %d is too large, max " "segs %d\n", seg_idx, XBB_MAX_SEGMENTS_PER_REQLIST)); xbb_sg->first_sect = sg->first_sect; xbb_sg->last_sect = sg->last_sect; xbb_sg->nsect = (int8_t)(sg->last_sect - sg->first_sect + 1); if ((sg->last_sect >= (PAGE_SIZE >> 9)) || (xbb_sg->nsect <= 0)) { reqlist->status = BLKIF_RSP_ERROR; goto send_response; } nr_sects += xbb_sg->nsect; map->host_addr = xbb_get_gntaddr(reqlist, seg_idx, /*sector*/0); KASSERT(map->host_addr + PAGE_SIZE <= xbb->ring_config.gnt_addr, ("Host address %#jx len %d overlaps " "ring address %#jx\n", (uintmax_t)map->host_addr, PAGE_SIZE, (uintmax_t)xbb->ring_config.gnt_addr)); map->flags = GNTMAP_host_map; map->ref = sg->gref; map->dom = xbb->otherend_id; if (operation == BIO_WRITE) map->flags |= GNTMAP_readonly; sg++; map++; xbb_sg++; seg_idx++; req_seg_idx++; } /* Convert to the disk's sector size */ nreq->nr_512b_sectors = nr_sects; nr_sects = (nr_sects << 9) >> xbb->sector_size_shift; total_sects += nr_sects; if ((nreq->nr_512b_sectors & ((xbb->sector_size >> 9) - 1)) != 0) { device_printf(xbb->dev, "%s: I/O size (%d) is not " "a multiple of the backing store sector " "size (%d)\n", __func__, nreq->nr_512b_sectors << 9, xbb->sector_size); reqlist->status = BLKIF_RSP_ERROR; goto send_response; } } error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, xbb->maps, reqlist->nr_segments); if (error != 0) panic("Grant table operation failed (%d)", error); reqlist->flags |= XBB_REQLIST_MAPPED; for (seg_idx = 0, map = xbb->maps; seg_idx < reqlist->nr_segments; seg_idx++, map++){ if (__predict_false(map->status != 0)) { DPRINTF("invalid buffer -- could not remap " "it (%d)\n", map->status); DPRINTF("Mapping(%d): Host Addr 0x%"PRIx64", flags " "0x%x ref 0x%x, dom %d\n", seg_idx, map->host_addr, map->flags, map->ref, map->dom); reqlist->status = BLKIF_RSP_ERROR; goto send_response; } reqlist->gnt_handles[seg_idx] = map->handle; } if (reqlist->starting_sector_number + total_sects > xbb->media_num_sectors) { DPRINTF("%s of [%" PRIu64 ",%" PRIu64 "] " "extends past end of device %s\n", operation == BIO_READ ? "read" : "write", reqlist->starting_sector_number, reqlist->starting_sector_number + total_sects, xbb->dev_name); reqlist->status = BLKIF_RSP_ERROR; goto send_response; } do_dispatch: error = xbb->dispatch_io(xbb, reqlist, operation, bio_flags); if (error != 0) { reqlist->status = BLKIF_RSP_ERROR; goto send_response; } return (0); send_response: xbb_complete_reqlist(xbb, reqlist); return (0); } static __inline int xbb_count_sects(blkif_request_t *ring_req) { int i; int cur_size = 0; for (i = 0; i < ring_req->nr_segments; i++) { int nsect; nsect = (int8_t)(ring_req->seg[i].last_sect - ring_req->seg[i].first_sect + 1); if (nsect <= 0) break; cur_size += nsect; } return (cur_size); } /** * Process incoming requests from the shared communication ring in response * to a signal on the ring's event channel. * * \param context Callback argument registerd during task initialization - * the xbb_softc for this instance. * \param pending The number of taskqueue_enqueue events that have * occurred since this handler was last run. */ static void xbb_run_queue(void *context, int pending) { struct xbb_softc *xbb; blkif_back_rings_t *rings; RING_IDX rp; uint64_t cur_sector; int cur_operation; struct xbb_xen_reqlist *reqlist; xbb = (struct xbb_softc *)context; rings = &xbb->rings; /* * Work gather and dispatch loop. Note that we have a bias here * towards gathering I/O sent by blockfront. We first gather up * everything in the ring, as long as we have resources. Then we * dispatch one request, and then attempt to gather up any * additional requests that have come in while we were dispatching * the request. * * This allows us to get a clearer picture (via devstat) of how * many requests blockfront is queueing to us at any given time. */ for (;;) { int retval; /* * Initialize reqlist to the last element in the pending * queue, if there is one. This allows us to add more * requests to that request list, if we have room. */ reqlist = STAILQ_LAST(&xbb->reqlist_pending_stailq, xbb_xen_reqlist, links); if (reqlist != NULL) { cur_sector = reqlist->next_contig_sector; cur_operation = reqlist->operation; } else { cur_operation = 0; cur_sector = 0; } /* * Cache req_prod to avoid accessing a cache line shared * with the frontend. */ rp = rings->common.sring->req_prod; /* Ensure we see queued requests up to 'rp'. */ rmb(); /** * Run so long as there is work to consume and the generation * of a response will not overflow the ring. * * @note There's a 1 to 1 relationship between requests and * responses, so an overflow should never occur. This * test is to protect our domain from digesting bogus * data. Shouldn't we log this? */ while (rings->common.req_cons != rp && RING_REQUEST_CONS_OVERFLOW(&rings->common, rings->common.req_cons) == 0){ blkif_request_t ring_req_storage; blkif_request_t *ring_req; int cur_size; switch (xbb->abi) { case BLKIF_PROTOCOL_NATIVE: ring_req = RING_GET_REQUEST(&xbb->rings.native, rings->common.req_cons); break; case BLKIF_PROTOCOL_X86_32: { struct blkif_x86_32_request *ring_req32; ring_req32 = RING_GET_REQUEST( &xbb->rings.x86_32, rings->common.req_cons); blkif_get_x86_32_req(&ring_req_storage, ring_req32); ring_req = &ring_req_storage; break; } case BLKIF_PROTOCOL_X86_64: { struct blkif_x86_64_request *ring_req64; ring_req64 =RING_GET_REQUEST(&xbb->rings.x86_64, rings->common.req_cons); blkif_get_x86_64_req(&ring_req_storage, ring_req64); ring_req = &ring_req_storage; break; } default: panic("Unexpected blkif protocol ABI."); /* NOTREACHED */ } /* * Check for situations that would require closing * off this I/O for further coalescing: * - Coalescing is turned off. * - Current I/O is out of sequence with the previous * I/O. * - Coalesced I/O would be too large. */ if ((reqlist != NULL) && ((xbb->no_coalesce_reqs != 0) || ((xbb->no_coalesce_reqs == 0) && ((ring_req->sector_number != cur_sector) || (ring_req->operation != cur_operation) || ((ring_req->nr_segments + reqlist->nr_segments) > xbb->max_reqlist_segments))))) { reqlist = NULL; } /* * Grab and check for all resources in one shot. * If we can't get all of the resources we need, * the shortage is noted and the thread will get * woken up when more resources are available. */ retval = xbb_get_resources(xbb, &reqlist, ring_req, xbb->rings.common.req_cons); if (retval != 0) { /* * Resource shortage has been recorded. * We'll be scheduled to run once a request * object frees up due to a completion. */ break; } /* * Signify that we can overwrite this request with * a response by incrementing our consumer index. * The response won't be generated until after * we've already consumed all necessary data out * of the version of the request in the ring buffer * (for native mode). We must update the consumer * index before issuing back-end I/O so there is * no possibility that it will complete and a * response be generated before we make room in * the queue for that response. */ xbb->rings.common.req_cons++; xbb->reqs_received++; cur_size = xbb_count_sects(ring_req); cur_sector = ring_req->sector_number + cur_size; reqlist->next_contig_sector = cur_sector; cur_operation = ring_req->operation; } /* Check for I/O to dispatch */ reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq); if (reqlist == NULL) { /* * We're out of work to do, put the task queue to * sleep. */ break; } /* * Grab the first request off the queue and attempt * to dispatch it. */ STAILQ_REMOVE_HEAD(&xbb->reqlist_pending_stailq, links); retval = xbb_dispatch_io(xbb, reqlist); if (retval != 0) { /* * xbb_dispatch_io() returns non-zero only when * there is a resource shortage. If that's the * case, re-queue this request on the head of the * queue, and go to sleep until we have more * resources. */ STAILQ_INSERT_HEAD(&xbb->reqlist_pending_stailq, reqlist, links); break; } else { /* * If we still have anything on the queue after * removing the head entry, that is because we * met one of the criteria to create a new * request list (outlined above), and we'll call * that a forced dispatch for statistical purposes. * * Otherwise, if there is only one element on the * queue, we coalesced everything available on * the ring and we'll call that a normal dispatch. */ reqlist = STAILQ_FIRST(&xbb->reqlist_pending_stailq); if (reqlist != NULL) xbb->forced_dispatch++; else xbb->normal_dispatch++; xbb->total_dispatch++; } } } /** * Interrupt handler bound to the shared ring's event channel. * * \param arg Callback argument registerd during event channel * binding - the xbb_softc for this instance. */ static int xbb_filter(void *arg) { struct xbb_softc *xbb; /* Defer to taskqueue thread. */ xbb = (struct xbb_softc *)arg; taskqueue_enqueue(xbb->io_taskqueue, &xbb->io_task); return (FILTER_HANDLED); } SDT_PROVIDER_DEFINE(xbb); SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_dev, flush, "int"); SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, read, "int", "uint64_t", "uint64_t"); SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_dev, write, "int", "uint64_t", "uint64_t"); /*----------------------------- Backend Handlers -----------------------------*/ /** * Backend handler for character device access. * * \param xbb Per-instance xbb configuration structure. * \param reqlist Allocated internal request list structure. * \param operation BIO_* I/O operation code. * \param bio_flags Additional bio_flag data to pass to any generated * bios (e.g. BIO_ORDERED).. * * \return 0 for success, errno codes for failure. */ static int xbb_dispatch_dev(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, int operation, int bio_flags) { struct xbb_dev_data *dev_data; struct bio *bios[XBB_MAX_SEGMENTS_PER_REQLIST]; off_t bio_offset; struct bio *bio; struct xbb_sg *xbb_sg; u_int nbio; u_int bio_idx; u_int nseg; u_int seg_idx; int error; dev_data = &xbb->backend.dev; bio_offset = (off_t)reqlist->starting_sector_number << xbb->sector_size_shift; error = 0; nbio = 0; bio_idx = 0; if (operation == BIO_FLUSH) { bio = g_new_bio(); if (__predict_false(bio == NULL)) { DPRINTF("Unable to allocate bio for BIO_FLUSH\n"); error = ENOMEM; return (error); } bio->bio_cmd = BIO_FLUSH; bio->bio_flags |= BIO_ORDERED; bio->bio_dev = dev_data->cdev; bio->bio_offset = 0; bio->bio_data = 0; bio->bio_done = xbb_bio_done; bio->bio_caller1 = reqlist; bio->bio_pblkno = 0; reqlist->pendcnt = 1; SDT_PROBE1(xbb, kernel, xbb_dispatch_dev, flush, device_get_unit(xbb->dev)); (*dev_data->csw->d_strategy)(bio); return (0); } xbb_sg = xbb->xbb_sgs; bio = NULL; nseg = reqlist->nr_segments; for (seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) { /* * KVA will not be contiguous, so any additional * I/O will need to be represented in a new bio. */ if ((bio != NULL) && (xbb_sg->first_sect != 0)) { if ((bio->bio_length & (xbb->sector_size - 1)) != 0) { printf("%s: Discontiguous I/O request " "from domain %d ends on " "non-sector boundary\n", __func__, xbb->otherend_id); error = EINVAL; goto fail_free_bios; } bio = NULL; } if (bio == NULL) { /* * Make sure that the start of this bio is * aligned to a device sector. */ if ((bio_offset & (xbb->sector_size - 1)) != 0){ printf("%s: Misaligned I/O request " "from domain %d\n", __func__, xbb->otherend_id); error = EINVAL; goto fail_free_bios; } bio = bios[nbio++] = g_new_bio(); if (__predict_false(bio == NULL)) { error = ENOMEM; goto fail_free_bios; } bio->bio_cmd = operation; bio->bio_flags |= bio_flags; bio->bio_dev = dev_data->cdev; bio->bio_offset = bio_offset; bio->bio_data = xbb_reqlist_ioaddr(reqlist, seg_idx, xbb_sg->first_sect); bio->bio_done = xbb_bio_done; bio->bio_caller1 = reqlist; bio->bio_pblkno = bio_offset >> xbb->sector_size_shift; } bio->bio_length += xbb_sg->nsect << 9; bio->bio_bcount = bio->bio_length; bio_offset += xbb_sg->nsect << 9; if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) { if ((bio->bio_length & (xbb->sector_size - 1)) != 0) { printf("%s: Discontiguous I/O request " "from domain %d ends on " "non-sector boundary\n", __func__, xbb->otherend_id); error = EINVAL; goto fail_free_bios; } /* * KVA will not be contiguous, so any additional * I/O will need to be represented in a new bio. */ bio = NULL; } } reqlist->pendcnt = nbio; for (bio_idx = 0; bio_idx < nbio; bio_idx++) { #ifdef XBB_USE_BOUNCE_BUFFERS vm_offset_t kva_offset; kva_offset = (vm_offset_t)bios[bio_idx]->bio_data - (vm_offset_t)reqlist->bounce; if (operation == BIO_WRITE) { memcpy(bios[bio_idx]->bio_data, (uint8_t *)reqlist->kva + kva_offset, bios[bio_idx]->bio_bcount); } #endif if (operation == BIO_READ) { SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, read, device_get_unit(xbb->dev), bios[bio_idx]->bio_offset, bios[bio_idx]->bio_length); } else if (operation == BIO_WRITE) { SDT_PROBE3(xbb, kernel, xbb_dispatch_dev, write, device_get_unit(xbb->dev), bios[bio_idx]->bio_offset, bios[bio_idx]->bio_length); } (*dev_data->csw->d_strategy)(bios[bio_idx]); } return (error); fail_free_bios: for (bio_idx = 0; bio_idx < (nbio-1); bio_idx++) g_destroy_bio(bios[bio_idx]); return (error); } SDT_PROBE_DEFINE1(xbb, kernel, xbb_dispatch_file, flush, "int"); SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, read, "int", "uint64_t", "uint64_t"); SDT_PROBE_DEFINE3(xbb, kernel, xbb_dispatch_file, write, "int", "uint64_t", "uint64_t"); /** * Backend handler for file access. * * \param xbb Per-instance xbb configuration structure. * \param reqlist Allocated internal request list. * \param operation BIO_* I/O operation code. * \param flags Additional bio_flag data to pass to any generated bios * (e.g. BIO_ORDERED).. * * \return 0 for success, errno codes for failure. */ static int xbb_dispatch_file(struct xbb_softc *xbb, struct xbb_xen_reqlist *reqlist, int operation, int flags) { struct xbb_file_data *file_data; u_int seg_idx; u_int nseg; struct uio xuio; struct xbb_sg *xbb_sg; struct iovec *xiovec; #ifdef XBB_USE_BOUNCE_BUFFERS void **p_vaddr; int saved_uio_iovcnt; #endif /* XBB_USE_BOUNCE_BUFFERS */ int error; file_data = &xbb->backend.file; error = 0; bzero(&xuio, sizeof(xuio)); switch (operation) { case BIO_READ: xuio.uio_rw = UIO_READ; break; case BIO_WRITE: xuio.uio_rw = UIO_WRITE; break; case BIO_FLUSH: { struct mount *mountpoint; SDT_PROBE1(xbb, kernel, xbb_dispatch_file, flush, device_get_unit(xbb->dev)); (void) vn_start_write(xbb->vn, &mountpoint, V_WAIT); vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); error = VOP_FSYNC(xbb->vn, MNT_WAIT, curthread); VOP_UNLOCK(xbb->vn, 0); vn_finished_write(mountpoint); goto bailout_send_response; /* NOTREACHED */ } default: panic("invalid operation %d", operation); /* NOTREACHED */ } xuio.uio_offset = (vm_offset_t)reqlist->starting_sector_number << xbb->sector_size_shift; xuio.uio_segflg = UIO_SYSSPACE; xuio.uio_iov = file_data->xiovecs; xuio.uio_iovcnt = 0; xbb_sg = xbb->xbb_sgs; nseg = reqlist->nr_segments; for (xiovec = NULL, seg_idx = 0; seg_idx < nseg; seg_idx++, xbb_sg++) { /* * If the first sector is not 0, the KVA will * not be contiguous and we'll need to go on * to another segment. */ if (xbb_sg->first_sect != 0) xiovec = NULL; if (xiovec == NULL) { xiovec = &file_data->xiovecs[xuio.uio_iovcnt]; xiovec->iov_base = xbb_reqlist_ioaddr(reqlist, seg_idx, xbb_sg->first_sect); #ifdef XBB_USE_BOUNCE_BUFFERS /* * Store the address of the incoming * buffer at this particular offset * as well, so we can do the copy * later without having to do more * work to recalculate this address. */ p_vaddr = &file_data->xiovecs_vaddr[xuio.uio_iovcnt]; *p_vaddr = xbb_reqlist_vaddr(reqlist, seg_idx, xbb_sg->first_sect); #endif /* XBB_USE_BOUNCE_BUFFERS */ xiovec->iov_len = 0; xuio.uio_iovcnt++; } xiovec->iov_len += xbb_sg->nsect << 9; xuio.uio_resid += xbb_sg->nsect << 9; /* * If the last sector is not the full page * size count, the next segment will not be * contiguous in KVA and we need a new iovec. */ if (xbb_sg->last_sect != (PAGE_SIZE - 512) >> 9) xiovec = NULL; } xuio.uio_td = curthread; #ifdef XBB_USE_BOUNCE_BUFFERS saved_uio_iovcnt = xuio.uio_iovcnt; if (operation == BIO_WRITE) { /* Copy the write data to the local buffer. */ for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr, xiovec = xuio.uio_iov; seg_idx < xuio.uio_iovcnt; seg_idx++, xiovec++, p_vaddr++) { memcpy(xiovec->iov_base, *p_vaddr, xiovec->iov_len); } } else { /* * We only need to save off the iovecs in the case of a * read, because the copy for the read happens after the * VOP_READ(). (The uio will get modified in that call * sequence.) */ memcpy(file_data->saved_xiovecs, xuio.uio_iov, xuio.uio_iovcnt * sizeof(xuio.uio_iov[0])); } #endif /* XBB_USE_BOUNCE_BUFFERS */ switch (operation) { case BIO_READ: SDT_PROBE3(xbb, kernel, xbb_dispatch_file, read, device_get_unit(xbb->dev), xuio.uio_offset, xuio.uio_resid); vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); /* * UFS pays attention to IO_DIRECT for reads. If the * DIRECTIO option is configured into the kernel, it calls * ffs_rawread(). But that only works for single-segment * uios with user space addresses. In our case, with a * kernel uio, it still reads into the buffer cache, but it * will just try to release the buffer from the cache later * on in ffs_read(). * * ZFS does not pay attention to IO_DIRECT for reads. * * UFS does not pay attention to IO_SYNC for reads. * * ZFS pays attention to IO_SYNC (which translates into the * Solaris define FRSYNC for zfs_read()) for reads. It * attempts to sync the file before reading. * * So, to attempt to provide some barrier semantics in the * BIO_ORDERED case, set both IO_DIRECT and IO_SYNC. */ error = VOP_READ(xbb->vn, &xuio, (flags & BIO_ORDERED) ? (IO_DIRECT|IO_SYNC) : 0, file_data->cred); VOP_UNLOCK(xbb->vn, 0); break; case BIO_WRITE: { struct mount *mountpoint; SDT_PROBE3(xbb, kernel, xbb_dispatch_file, write, device_get_unit(xbb->dev), xuio.uio_offset, xuio.uio_resid); (void)vn_start_write(xbb->vn, &mountpoint, V_WAIT); vn_lock(xbb->vn, LK_EXCLUSIVE | LK_RETRY); /* * UFS pays attention to IO_DIRECT for writes. The write * is done asynchronously. (Normally the write would just * get put into cache. * * UFS pays attention to IO_SYNC for writes. It will * attempt to write the buffer out synchronously if that * flag is set. * * ZFS does not pay attention to IO_DIRECT for writes. * * ZFS pays attention to IO_SYNC (a.k.a. FSYNC or FRSYNC) * for writes. It will flush the transaction from the * cache before returning. * * So if we've got the BIO_ORDERED flag set, we want * IO_SYNC in either the UFS or ZFS case. */ error = VOP_WRITE(xbb->vn, &xuio, (flags & BIO_ORDERED) ? IO_SYNC : 0, file_data->cred); VOP_UNLOCK(xbb->vn, 0); vn_finished_write(mountpoint); break; } default: panic("invalid operation %d", operation); /* NOTREACHED */ } #ifdef XBB_USE_BOUNCE_BUFFERS /* We only need to copy here for read operations */ if (operation == BIO_READ) { for (seg_idx = 0, p_vaddr = file_data->xiovecs_vaddr, xiovec = file_data->saved_xiovecs; seg_idx < saved_uio_iovcnt; seg_idx++, xiovec++, p_vaddr++) { /* * Note that we have to use the copy of the * io vector we made above. uiomove() modifies * the uio and its referenced vector as uiomove * performs the copy, so we can't rely on any * state from the original uio. */ memcpy(*p_vaddr, xiovec->iov_base, xiovec->iov_len); } } #endif /* XBB_USE_BOUNCE_BUFFERS */ bailout_send_response: if (error != 0) reqlist->status = BLKIF_RSP_ERROR; xbb_complete_reqlist(xbb, reqlist); return (0); } /*--------------------------- Backend Configuration --------------------------*/ /** * Close and cleanup any backend device/file specific state for this * block back instance. * * \param xbb Per-instance xbb configuration structure. */ static void xbb_close_backend(struct xbb_softc *xbb) { DROP_GIANT(); DPRINTF("closing dev=%s\n", xbb->dev_name); if (xbb->vn) { int flags = FREAD; if ((xbb->flags & XBBF_READ_ONLY) == 0) flags |= FWRITE; switch (xbb->device_type) { case XBB_TYPE_DISK: if (xbb->backend.dev.csw) { dev_relthread(xbb->backend.dev.cdev, xbb->backend.dev.dev_ref); xbb->backend.dev.csw = NULL; xbb->backend.dev.cdev = NULL; } break; case XBB_TYPE_FILE: break; case XBB_TYPE_NONE: default: panic("Unexpected backend type."); break; } (void)vn_close(xbb->vn, flags, NOCRED, curthread); xbb->vn = NULL; switch (xbb->device_type) { case XBB_TYPE_DISK: break; case XBB_TYPE_FILE: if (xbb->backend.file.cred != NULL) { crfree(xbb->backend.file.cred); xbb->backend.file.cred = NULL; } break; case XBB_TYPE_NONE: default: panic("Unexpected backend type."); break; } } PICKUP_GIANT(); } /** * Open a character device to be used for backend I/O. * * \param xbb Per-instance xbb configuration structure. * * \return 0 for success, errno codes for failure. */ static int xbb_open_dev(struct xbb_softc *xbb) { struct vattr vattr; struct cdev *dev; struct cdevsw *devsw; int error; xbb->device_type = XBB_TYPE_DISK; xbb->dispatch_io = xbb_dispatch_dev; xbb->backend.dev.cdev = xbb->vn->v_rdev; xbb->backend.dev.csw = dev_refthread(xbb->backend.dev.cdev, &xbb->backend.dev.dev_ref); if (xbb->backend.dev.csw == NULL) panic("Unable to retrieve device switch"); error = VOP_GETATTR(xbb->vn, &vattr, NOCRED); if (error) { xenbus_dev_fatal(xbb->dev, error, "error getting " "vnode attributes for device %s", xbb->dev_name); return (error); } dev = xbb->vn->v_rdev; devsw = dev->si_devsw; if (!devsw->d_ioctl) { xenbus_dev_fatal(xbb->dev, ENODEV, "no d_ioctl for " "device %s!", xbb->dev_name); return (ENODEV); } error = devsw->d_ioctl(dev, DIOCGSECTORSIZE, (caddr_t)&xbb->sector_size, FREAD, curthread); if (error) { xenbus_dev_fatal(xbb->dev, error, "error calling ioctl DIOCGSECTORSIZE " "for device %s", xbb->dev_name); return (error); } error = devsw->d_ioctl(dev, DIOCGMEDIASIZE, (caddr_t)&xbb->media_size, FREAD, curthread); if (error) { xenbus_dev_fatal(xbb->dev, error, "error calling ioctl DIOCGMEDIASIZE " "for device %s", xbb->dev_name); return (error); } return (0); } /** * Open a file to be used for backend I/O. * * \param xbb Per-instance xbb configuration structure. * * \return 0 for success, errno codes for failure. */ static int xbb_open_file(struct xbb_softc *xbb) { struct xbb_file_data *file_data; struct vattr vattr; int error; file_data = &xbb->backend.file; xbb->device_type = XBB_TYPE_FILE; xbb->dispatch_io = xbb_dispatch_file; error = VOP_GETATTR(xbb->vn, &vattr, curthread->td_ucred); if (error != 0) { xenbus_dev_fatal(xbb->dev, error, "error calling VOP_GETATTR()" "for file %s", xbb->dev_name); return (error); } /* * Verify that we have the ability to upgrade to exclusive * access on this file so we can trap errors at open instead * of reporting them during first access. */ if (VOP_ISLOCKED(xbb->vn) != LK_EXCLUSIVE) { vn_lock(xbb->vn, LK_UPGRADE | LK_RETRY); if (xbb->vn->v_iflag & VI_DOOMED) { error = EBADF; xenbus_dev_fatal(xbb->dev, error, "error locking file %s", xbb->dev_name); return (error); } } file_data->cred = crhold(curthread->td_ucred); xbb->media_size = vattr.va_size; /* * XXX KDM vattr.va_blocksize may be larger than 512 bytes here. * With ZFS, it is 131072 bytes. Block sizes that large don't work * with disklabel and UFS on FreeBSD at least. Large block sizes * may not work with other OSes as well. So just export a sector * size of 512 bytes, which should work with any OS or * application. Since our backing is a file, any block size will * work fine for the backing store. */ #if 0 xbb->sector_size = vattr.va_blocksize; #endif xbb->sector_size = 512; /* * Sanity check. The media size has to be at least one * sector long. */ if (xbb->media_size < xbb->sector_size) { error = EINVAL; xenbus_dev_fatal(xbb->dev, error, "file %s size %ju < block size %u", xbb->dev_name, (uintmax_t)xbb->media_size, xbb->sector_size); } return (error); } /** * Open the backend provider for this connection. * * \param xbb Per-instance xbb configuration structure. * * \return 0 for success, errno codes for failure. */ static int xbb_open_backend(struct xbb_softc *xbb) { struct nameidata nd; int flags; int error; flags = FREAD; error = 0; DPRINTF("opening dev=%s\n", xbb->dev_name); if (rootvnode == NULL) { xenbus_dev_fatal(xbb->dev, ENOENT, "Root file system not mounted"); return (ENOENT); } if ((xbb->flags & XBBF_READ_ONLY) == 0) flags |= FWRITE; pwd_ensure_dirs(); again: NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, xbb->dev_name, curthread); error = vn_open(&nd, &flags, 0, NULL); if (error) { /* * This is the only reasonable guess we can make as far as * path if the user doesn't give us a fully qualified path. * If they want to specify a file, they need to specify the * full path. */ if (xbb->dev_name[0] != '/') { char *dev_path = "/dev/"; char *dev_name; /* Try adding device path at beginning of name */ dev_name = malloc(strlen(xbb->dev_name) + strlen(dev_path) + 1, M_XENBLOCKBACK, M_NOWAIT); if (dev_name) { sprintf(dev_name, "%s%s", dev_path, xbb->dev_name); free(xbb->dev_name, M_XENBLOCKBACK); xbb->dev_name = dev_name; goto again; } } xenbus_dev_fatal(xbb->dev, error, "error opening device %s", xbb->dev_name); return (error); } NDFREE(&nd, NDF_ONLY_PNBUF); xbb->vn = nd.ni_vp; /* We only support disks and files. */ if (vn_isdisk(xbb->vn, &error)) { error = xbb_open_dev(xbb); } else if (xbb->vn->v_type == VREG) { error = xbb_open_file(xbb); } else { error = EINVAL; xenbus_dev_fatal(xbb->dev, error, "%s is not a disk " "or file", xbb->dev_name); } VOP_UNLOCK(xbb->vn, 0); if (error != 0) { xbb_close_backend(xbb); return (error); } xbb->sector_size_shift = fls(xbb->sector_size) - 1; xbb->media_num_sectors = xbb->media_size >> xbb->sector_size_shift; DPRINTF("opened %s=%s sector_size=%u media_size=%" PRId64 "\n", (xbb->device_type == XBB_TYPE_DISK) ? "dev" : "file", xbb->dev_name, xbb->sector_size, xbb->media_size); return (0); } /*------------------------ Inter-Domain Communication ------------------------*/ /** * Free dynamically allocated KVA or pseudo-physical address allocations. * * \param xbb Per-instance xbb configuration structure. */ static void xbb_free_communication_mem(struct xbb_softc *xbb) { if (xbb->kva != 0) { if (xbb->pseudo_phys_res != NULL) { xenmem_free(xbb->dev, xbb->pseudo_phys_res_id, xbb->pseudo_phys_res); xbb->pseudo_phys_res = NULL; } } xbb->kva = 0; xbb->gnt_base_addr = 0; if (xbb->kva_free != NULL) { free(xbb->kva_free, M_XENBLOCKBACK); xbb->kva_free = NULL; } } /** * Cleanup all inter-domain communication mechanisms. * * \param xbb Per-instance xbb configuration structure. */ static int xbb_disconnect(struct xbb_softc *xbb) { struct gnttab_unmap_grant_ref ops[XBB_MAX_RING_PAGES]; struct gnttab_unmap_grant_ref *op; u_int ring_idx; int error; DPRINTF("\n"); if ((xbb->flags & XBBF_RING_CONNECTED) == 0) return (0); xen_intr_unbind(&xbb->xen_intr_handle); mtx_unlock(&xbb->lock); taskqueue_drain(xbb->io_taskqueue, &xbb->io_task); mtx_lock(&xbb->lock); /* * No new interrupts can generate work, but we must wait * for all currently active requests to drain. */ if (xbb->active_request_count != 0) return (EAGAIN); for (ring_idx = 0, op = ops; ring_idx < xbb->ring_config.ring_pages; ring_idx++, op++) { op->host_addr = xbb->ring_config.gnt_addr + (ring_idx * PAGE_SIZE); op->dev_bus_addr = xbb->ring_config.bus_addr[ring_idx]; op->handle = xbb->ring_config.handle[ring_idx]; } error = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, ops, xbb->ring_config.ring_pages); if (error != 0) panic("Grant table op failed (%d)", error); xbb_free_communication_mem(xbb); if (xbb->requests != NULL) { free(xbb->requests, M_XENBLOCKBACK); xbb->requests = NULL; } if (xbb->request_lists != NULL) { struct xbb_xen_reqlist *reqlist; int i; /* There is one request list for ever allocated request. */ for (i = 0, reqlist = xbb->request_lists; i < xbb->max_requests; i++, reqlist++){ #ifdef XBB_USE_BOUNCE_BUFFERS if (reqlist->bounce != NULL) { free(reqlist->bounce, M_XENBLOCKBACK); reqlist->bounce = NULL; } #endif if (reqlist->gnt_handles != NULL) { free(reqlist->gnt_handles, M_XENBLOCKBACK); reqlist->gnt_handles = NULL; } } free(xbb->request_lists, M_XENBLOCKBACK); xbb->request_lists = NULL; } xbb->flags &= ~XBBF_RING_CONNECTED; return (0); } /** * Map shared memory ring into domain local address space, initialize * ring control structures, and bind an interrupt to the event channel * used to notify us of ring changes. * * \param xbb Per-instance xbb configuration structure. */ static int xbb_connect_ring(struct xbb_softc *xbb) { struct gnttab_map_grant_ref gnts[XBB_MAX_RING_PAGES]; struct gnttab_map_grant_ref *gnt; u_int ring_idx; int error; if ((xbb->flags & XBBF_RING_CONNECTED) != 0) return (0); /* * Kva for our ring is at the tail of the region of kva allocated * by xbb_alloc_communication_mem(). */ xbb->ring_config.va = xbb->kva + (xbb->kva_size - (xbb->ring_config.ring_pages * PAGE_SIZE)); xbb->ring_config.gnt_addr = xbb->gnt_base_addr + (xbb->kva_size - (xbb->ring_config.ring_pages * PAGE_SIZE)); for (ring_idx = 0, gnt = gnts; ring_idx < xbb->ring_config.ring_pages; ring_idx++, gnt++) { gnt->host_addr = xbb->ring_config.gnt_addr + (ring_idx * PAGE_SIZE); gnt->flags = GNTMAP_host_map; gnt->ref = xbb->ring_config.ring_ref[ring_idx]; gnt->dom = xbb->otherend_id; } error = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, gnts, xbb->ring_config.ring_pages); if (error) panic("blkback: Ring page grant table op failed (%d)", error); for (ring_idx = 0, gnt = gnts; ring_idx < xbb->ring_config.ring_pages; ring_idx++, gnt++) { if (gnt->status != 0) { xbb->ring_config.va = 0; xenbus_dev_fatal(xbb->dev, EACCES, "Ring shared page mapping failed. " "Status %d.", gnt->status); return (EACCES); } xbb->ring_config.handle[ring_idx] = gnt->handle; xbb->ring_config.bus_addr[ring_idx] = gnt->dev_bus_addr; } /* Initialize the ring based on ABI. */ switch (xbb->abi) { case BLKIF_PROTOCOL_NATIVE: { blkif_sring_t *sring; sring = (blkif_sring_t *)xbb->ring_config.va; BACK_RING_INIT(&xbb->rings.native, sring, xbb->ring_config.ring_pages * PAGE_SIZE); break; } case BLKIF_PROTOCOL_X86_32: { blkif_x86_32_sring_t *sring_x86_32; sring_x86_32 = (blkif_x86_32_sring_t *)xbb->ring_config.va; BACK_RING_INIT(&xbb->rings.x86_32, sring_x86_32, xbb->ring_config.ring_pages * PAGE_SIZE); break; } case BLKIF_PROTOCOL_X86_64: { blkif_x86_64_sring_t *sring_x86_64; sring_x86_64 = (blkif_x86_64_sring_t *)xbb->ring_config.va; BACK_RING_INIT(&xbb->rings.x86_64, sring_x86_64, xbb->ring_config.ring_pages * PAGE_SIZE); break; } default: panic("Unexpected blkif protocol ABI."); } xbb->flags |= XBBF_RING_CONNECTED; error = xen_intr_bind_remote_port(xbb->dev, xbb->otherend_id, xbb->ring_config.evtchn, xbb_filter, /*ithread_handler*/NULL, /*arg*/xbb, INTR_TYPE_BIO | INTR_MPSAFE, &xbb->xen_intr_handle); if (error) { (void)xbb_disconnect(xbb); xenbus_dev_fatal(xbb->dev, error, "binding event channel"); return (error); } DPRINTF("rings connected!\n"); return 0; } /** * Size KVA and pseudo-physical address allocations based on negotiated * values for the size and number of I/O requests, and the size of our * communication ring. * * \param xbb Per-instance xbb configuration structure. * * These address spaces are used to dynamically map pages in the * front-end's domain into our own. */ static int xbb_alloc_communication_mem(struct xbb_softc *xbb) { xbb->reqlist_kva_pages = xbb->max_requests * xbb->max_request_segments; xbb->reqlist_kva_size = xbb->reqlist_kva_pages * PAGE_SIZE; xbb->kva_size = xbb->reqlist_kva_size + (xbb->ring_config.ring_pages * PAGE_SIZE); xbb->kva_free = bit_alloc(xbb->reqlist_kva_pages, M_XENBLOCKBACK, M_NOWAIT); if (xbb->kva_free == NULL) return (ENOMEM); DPRINTF("%s: kva_size = %d, reqlist_kva_size = %d\n", device_get_nameunit(xbb->dev), xbb->kva_size, xbb->reqlist_kva_size); /* * Reserve a range of pseudo physical memory that we can map * into kva. These pages will only be backed by machine * pages ("real memory") during the lifetime of front-end requests * via grant table operations. */ xbb->pseudo_phys_res_id = 0; xbb->pseudo_phys_res = xenmem_alloc(xbb->dev, &xbb->pseudo_phys_res_id, xbb->kva_size); if (xbb->pseudo_phys_res == NULL) { xbb->kva = 0; return (ENOMEM); } xbb->kva = (vm_offset_t)rman_get_virtual(xbb->pseudo_phys_res); xbb->gnt_base_addr = rman_get_start(xbb->pseudo_phys_res); DPRINTF("%s: kva: %#jx, gnt_base_addr: %#jx\n", device_get_nameunit(xbb->dev), (uintmax_t)xbb->kva, (uintmax_t)xbb->gnt_base_addr); return (0); } /** * Collect front-end information from the XenStore. * * \param xbb Per-instance xbb configuration structure. */ static int xbb_collect_frontend_info(struct xbb_softc *xbb) { char protocol_abi[64]; const char *otherend_path; int error; u_int ring_idx; u_int ring_page_order; size_t ring_size; otherend_path = xenbus_get_otherend_path(xbb->dev); /* * Protocol defaults valid even if all negotiation fails. */ xbb->ring_config.ring_pages = 1; xbb->max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; xbb->max_request_size = xbb->max_request_segments * PAGE_SIZE; /* * Mandatory data (used in all versions of the protocol) first. */ error = xs_scanf(XST_NIL, otherend_path, "event-channel", NULL, "%" PRIu32, &xbb->ring_config.evtchn); if (error != 0) { xenbus_dev_fatal(xbb->dev, error, "Unable to retrieve event-channel information " "from frontend %s. Unable to connect.", xenbus_get_otherend_path(xbb->dev)); return (error); } /* * These fields are initialized to legacy protocol defaults * so we only need to fail if reading the updated value succeeds * and the new value is outside of its allowed range. * * \note xs_gather() returns on the first encountered error, so * we must use independent calls in order to guarantee * we don't miss information in a sparsly populated front-end * tree. * * \note xs_scanf() does not update variables for unmatched * fields. */ ring_page_order = 0; xbb->max_requests = 32; (void)xs_scanf(XST_NIL, otherend_path, "ring-page-order", NULL, "%u", &ring_page_order); xbb->ring_config.ring_pages = 1 << ring_page_order; ring_size = PAGE_SIZE * xbb->ring_config.ring_pages; xbb->max_requests = BLKIF_MAX_RING_REQUESTS(ring_size); if (xbb->ring_config.ring_pages > XBB_MAX_RING_PAGES) { xenbus_dev_fatal(xbb->dev, EINVAL, "Front-end specified ring-pages of %u " "exceeds backend limit of %u. " "Unable to connect.", xbb->ring_config.ring_pages, XBB_MAX_RING_PAGES); return (EINVAL); } if (xbb->ring_config.ring_pages == 1) { error = xs_gather(XST_NIL, otherend_path, "ring-ref", "%" PRIu32, &xbb->ring_config.ring_ref[0], NULL); if (error != 0) { xenbus_dev_fatal(xbb->dev, error, "Unable to retrieve ring information " "from frontend %s. Unable to " "connect.", xenbus_get_otherend_path(xbb->dev)); return (error); } } else { /* Multi-page ring format. */ for (ring_idx = 0; ring_idx < xbb->ring_config.ring_pages; ring_idx++) { char ring_ref_name[]= "ring_refXX"; snprintf(ring_ref_name, sizeof(ring_ref_name), "ring-ref%u", ring_idx); error = xs_scanf(XST_NIL, otherend_path, ring_ref_name, NULL, "%" PRIu32, &xbb->ring_config.ring_ref[ring_idx]); if (error != 0) { xenbus_dev_fatal(xbb->dev, error, "Failed to retriev grant " "reference for page %u of " "shared ring. Unable " "to connect.", ring_idx); return (error); } } } error = xs_gather(XST_NIL, otherend_path, "protocol", "%63s", protocol_abi, NULL); if (error != 0 || !strcmp(protocol_abi, XEN_IO_PROTO_ABI_NATIVE)) { /* * Assume native if the frontend has not * published ABI data or it has published and * matches our own ABI. */ xbb->abi = BLKIF_PROTOCOL_NATIVE; } else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_32)) { xbb->abi = BLKIF_PROTOCOL_X86_32; } else if (!strcmp(protocol_abi, XEN_IO_PROTO_ABI_X86_64)) { xbb->abi = BLKIF_PROTOCOL_X86_64; } else { xenbus_dev_fatal(xbb->dev, EINVAL, "Unknown protocol ABI (%s) published by " "frontend. Unable to connect.", protocol_abi); return (EINVAL); } return (0); } /** * Allocate per-request data structures given request size and number * information negotiated with the front-end. * * \param xbb Per-instance xbb configuration structure. */ static int xbb_alloc_requests(struct xbb_softc *xbb) { struct xbb_xen_req *req; struct xbb_xen_req *last_req; /* * Allocate request book keeping datastructures. */ - xbb->requests = malloc(xbb->max_requests * sizeof(*xbb->requests), + xbb->requests = mallocarray(xbb->max_requests, sizeof(*xbb->requests), M_XENBLOCKBACK, M_NOWAIT|M_ZERO); if (xbb->requests == NULL) { xenbus_dev_fatal(xbb->dev, ENOMEM, "Unable to allocate request structures"); return (ENOMEM); } req = xbb->requests; last_req = &xbb->requests[xbb->max_requests - 1]; STAILQ_INIT(&xbb->request_free_stailq); while (req <= last_req) { STAILQ_INSERT_TAIL(&xbb->request_free_stailq, req, links); req++; } return (0); } static int xbb_alloc_request_lists(struct xbb_softc *xbb) { struct xbb_xen_reqlist *reqlist; int i; /* * If no requests can be merged, we need 1 request list per * in flight request. */ - xbb->request_lists = malloc(xbb->max_requests * + xbb->request_lists = mallocarray(xbb->max_requests, sizeof(*xbb->request_lists), M_XENBLOCKBACK, M_NOWAIT|M_ZERO); if (xbb->request_lists == NULL) { xenbus_dev_fatal(xbb->dev, ENOMEM, "Unable to allocate request list structures"); return (ENOMEM); } STAILQ_INIT(&xbb->reqlist_free_stailq); STAILQ_INIT(&xbb->reqlist_pending_stailq); for (i = 0; i < xbb->max_requests; i++) { int seg; reqlist = &xbb->request_lists[i]; reqlist->xbb = xbb; #ifdef XBB_USE_BOUNCE_BUFFERS reqlist->bounce = malloc(xbb->max_reqlist_size, M_XENBLOCKBACK, M_NOWAIT); if (reqlist->bounce == NULL) { xenbus_dev_fatal(xbb->dev, ENOMEM, "Unable to allocate request " "bounce buffers"); return (ENOMEM); } #endif /* XBB_USE_BOUNCE_BUFFERS */ - reqlist->gnt_handles = malloc(xbb->max_reqlist_segments * + reqlist->gnt_handles = mallocarray(xbb->max_reqlist_segments, sizeof(*reqlist->gnt_handles), M_XENBLOCKBACK, M_NOWAIT|M_ZERO); if (reqlist->gnt_handles == NULL) { xenbus_dev_fatal(xbb->dev, ENOMEM, "Unable to allocate request " "grant references"); return (ENOMEM); } for (seg = 0; seg < xbb->max_reqlist_segments; seg++) reqlist->gnt_handles[seg] = GRANT_REF_INVALID; STAILQ_INSERT_TAIL(&xbb->reqlist_free_stailq, reqlist, links); } return (0); } /** * Supply information about the physical device to the frontend * via XenBus. * * \param xbb Per-instance xbb configuration structure. */ static int xbb_publish_backend_info(struct xbb_softc *xbb) { struct xs_transaction xst; const char *our_path; const char *leaf; int error; our_path = xenbus_get_node(xbb->dev); while (1) { error = xs_transaction_start(&xst); if (error != 0) { xenbus_dev_fatal(xbb->dev, error, "Error publishing backend info " "(start transaction)"); return (error); } leaf = "sectors"; error = xs_printf(xst, our_path, leaf, "%"PRIu64, xbb->media_num_sectors); if (error != 0) break; /* XXX Support all VBD attributes here. */ leaf = "info"; error = xs_printf(xst, our_path, leaf, "%u", xbb->flags & XBBF_READ_ONLY ? VDISK_READONLY : 0); if (error != 0) break; leaf = "sector-size"; error = xs_printf(xst, our_path, leaf, "%u", xbb->sector_size); if (error != 0) break; error = xs_transaction_end(xst, 0); if (error == 0) { return (0); } else if (error != EAGAIN) { xenbus_dev_fatal(xbb->dev, error, "ending transaction"); return (error); } } xenbus_dev_fatal(xbb->dev, error, "writing %s/%s", our_path, leaf); xs_transaction_end(xst, 1); return (error); } /** * Connect to our blkfront peer now that it has completed publishing * its configuration into the XenStore. * * \param xbb Per-instance xbb configuration structure. */ static void xbb_connect(struct xbb_softc *xbb) { int error; if (xenbus_get_state(xbb->dev) != XenbusStateInitialised) return; if (xbb_collect_frontend_info(xbb) != 0) return; xbb->flags &= ~XBBF_SHUTDOWN; /* * We limit the maximum number of reqlist segments to the maximum * number of segments in the ring, or our absolute maximum, * whichever is smaller. */ xbb->max_reqlist_segments = MIN(xbb->max_request_segments * xbb->max_requests, XBB_MAX_SEGMENTS_PER_REQLIST); /* * The maximum size is simply a function of the number of segments * we can handle. */ xbb->max_reqlist_size = xbb->max_reqlist_segments * PAGE_SIZE; /* Allocate resources whose size depends on front-end configuration. */ error = xbb_alloc_communication_mem(xbb); if (error != 0) { xenbus_dev_fatal(xbb->dev, error, "Unable to allocate communication memory"); return; } error = xbb_alloc_requests(xbb); if (error != 0) { /* Specific errors are reported by xbb_alloc_requests(). */ return; } error = xbb_alloc_request_lists(xbb); if (error != 0) { /* Specific errors are reported by xbb_alloc_request_lists(). */ return; } /* * Connect communication channel. */ error = xbb_connect_ring(xbb); if (error != 0) { /* Specific errors are reported by xbb_connect_ring(). */ return; } if (xbb_publish_backend_info(xbb) != 0) { /* * If we can't publish our data, we cannot participate * in this connection, and waiting for a front-end state * change will not help the situation. */ (void)xbb_disconnect(xbb); return; } /* Ready for I/O. */ xenbus_set_state(xbb->dev, XenbusStateConnected); } /*-------------------------- Device Teardown Support -------------------------*/ /** * Perform device shutdown functions. * * \param xbb Per-instance xbb configuration structure. * * Mark this instance as shutting down, wait for any active I/O on the * backend device/file to drain, disconnect from the front-end, and notify * any waiters (e.g. a thread invoking our detach method) that detach can * now proceed. */ static int xbb_shutdown(struct xbb_softc *xbb) { XenbusState frontState; int error; DPRINTF("\n"); /* * Due to the need to drop our mutex during some * xenbus operations, it is possible for two threads * to attempt to close out shutdown processing at * the same time. Tell the caller that hits this * race to try back later. */ if ((xbb->flags & XBBF_IN_SHUTDOWN) != 0) return (EAGAIN); xbb->flags |= XBBF_IN_SHUTDOWN; mtx_unlock(&xbb->lock); if (xbb->hotplug_watch.node != NULL) { xs_unregister_watch(&xbb->hotplug_watch); free(xbb->hotplug_watch.node, M_XENBLOCKBACK); xbb->hotplug_watch.node = NULL; } if (xenbus_get_state(xbb->dev) < XenbusStateClosing) xenbus_set_state(xbb->dev, XenbusStateClosing); frontState = xenbus_get_otherend_state(xbb->dev); mtx_lock(&xbb->lock); xbb->flags &= ~XBBF_IN_SHUTDOWN; /* Wait for the frontend to disconnect (if it's connected). */ if (frontState == XenbusStateConnected) return (EAGAIN); DPRINTF("\n"); /* Indicate shutdown is in progress. */ xbb->flags |= XBBF_SHUTDOWN; /* Disconnect from the front-end. */ error = xbb_disconnect(xbb); if (error != 0) { /* * Requests still outstanding. We'll be called again * once they complete. */ KASSERT(error == EAGAIN, ("%s: Unexpected xbb_disconnect() failure %d", __func__, error)); return (error); } DPRINTF("\n"); /* Indicate to xbb_detach() that is it safe to proceed. */ wakeup(xbb); return (0); } /** * Report an attach time error to the console and Xen, and cleanup * this instance by forcing immediate detach processing. * * \param xbb Per-instance xbb configuration structure. * \param err Errno describing the error. * \param fmt Printf style format and arguments */ static void xbb_attach_failed(struct xbb_softc *xbb, int err, const char *fmt, ...) { va_list ap; va_list ap_hotplug; va_start(ap, fmt); va_copy(ap_hotplug, ap); xs_vprintf(XST_NIL, xenbus_get_node(xbb->dev), "hotplug-error", fmt, ap_hotplug); va_end(ap_hotplug); xs_printf(XST_NIL, xenbus_get_node(xbb->dev), "hotplug-status", "error"); xenbus_dev_vfatal(xbb->dev, err, fmt, ap); va_end(ap); xs_printf(XST_NIL, xenbus_get_node(xbb->dev), "online", "0"); mtx_lock(&xbb->lock); xbb_shutdown(xbb); mtx_unlock(&xbb->lock); } /*---------------------------- NewBus Entrypoints ----------------------------*/ /** * Inspect a XenBus device and claim it if is of the appropriate type. * * \param dev NewBus device object representing a candidate XenBus device. * * \return 0 for success, errno codes for failure. */ static int xbb_probe(device_t dev) { if (!strcmp(xenbus_get_type(dev), "vbd")) { device_set_desc(dev, "Backend Virtual Block Device"); device_quiet(dev); return (0); } return (ENXIO); } /** * Setup sysctl variables to control various Block Back parameters. * * \param xbb Xen Block Back softc. * */ static void xbb_setup_sysctl(struct xbb_softc *xbb) { struct sysctl_ctx_list *sysctl_ctx = NULL; struct sysctl_oid *sysctl_tree = NULL; sysctl_ctx = device_get_sysctl_ctx(xbb->dev); if (sysctl_ctx == NULL) return; sysctl_tree = device_get_sysctl_tree(xbb->dev); if (sysctl_tree == NULL) return; SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "disable_flush", CTLFLAG_RW, &xbb->disable_flush, 0, "fake the flush command"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "flush_interval", CTLFLAG_RW, &xbb->flush_interval, 0, "send a real flush for N flush requests"); SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "no_coalesce_reqs", CTLFLAG_RW, &xbb->no_coalesce_reqs,0, "Don't coalesce contiguous requests"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reqs_received", CTLFLAG_RW, &xbb->reqs_received, "how many I/O requests we have received"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reqs_completed", CTLFLAG_RW, &xbb->reqs_completed, "how many I/O requests have been completed"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reqs_queued_for_completion", CTLFLAG_RW, &xbb->reqs_queued_for_completion, "how many I/O requests queued but not yet pushed"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "reqs_completed_with_error", CTLFLAG_RW, &xbb->reqs_completed_with_error, "how many I/O requests completed with error status"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "forced_dispatch", CTLFLAG_RW, &xbb->forced_dispatch, "how many I/O dispatches were forced"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "normal_dispatch", CTLFLAG_RW, &xbb->normal_dispatch, "how many I/O dispatches were normal"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "total_dispatch", CTLFLAG_RW, &xbb->total_dispatch, "total number of I/O dispatches"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "kva_shortages", CTLFLAG_RW, &xbb->kva_shortages, "how many times we have run out of KVA"); SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "request_shortages", CTLFLAG_RW, &xbb->request_shortages, "how many times we have run out of requests"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_requests", CTLFLAG_RD, &xbb->max_requests, 0, "maximum outstanding requests (negotiated)"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_request_segments", CTLFLAG_RD, &xbb->max_request_segments, 0, "maximum number of pages per requests (negotiated)"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "max_request_size", CTLFLAG_RD, &xbb->max_request_size, 0, "maximum size in bytes of a request (negotiated)"); SYSCTL_ADD_UINT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), OID_AUTO, "ring_pages", CTLFLAG_RD, &xbb->ring_config.ring_pages, 0, "communication channel pages (negotiated)"); } static void xbb_attach_disk(struct xs_watch *watch, const char **vec, unsigned int len) { device_t dev; struct xbb_softc *xbb; int error; dev = (device_t) watch->callback_data; xbb = device_get_softc(dev); error = xs_gather(XST_NIL, xenbus_get_node(dev), "physical-device-path", NULL, &xbb->dev_name, NULL); if (error != 0) return; xs_unregister_watch(watch); free(watch->node, M_XENBLOCKBACK); watch->node = NULL; /* Collect physical device information. */ error = xs_gather(XST_NIL, xenbus_get_otherend_path(xbb->dev), "device-type", NULL, &xbb->dev_type, NULL); if (error != 0) xbb->dev_type = NULL; error = xs_gather(XST_NIL, xenbus_get_node(dev), "mode", NULL, &xbb->dev_mode, NULL); if (error != 0) { xbb_attach_failed(xbb, error, "reading backend fields at %s", xenbus_get_node(dev)); return; } /* Parse fopen style mode flags. */ if (strchr(xbb->dev_mode, 'w') == NULL) xbb->flags |= XBBF_READ_ONLY; /* * Verify the physical device is present and can support * the desired I/O mode. */ error = xbb_open_backend(xbb); if (error != 0) { xbb_attach_failed(xbb, error, "Unable to open %s", xbb->dev_name); return; } /* Use devstat(9) for recording statistics. */ xbb->xbb_stats = devstat_new_entry("xbb", device_get_unit(xbb->dev), xbb->sector_size, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_OTHER, DEVSTAT_PRIORITY_OTHER); xbb->xbb_stats_in = devstat_new_entry("xbbi", device_get_unit(xbb->dev), xbb->sector_size, DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_OTHER, DEVSTAT_PRIORITY_OTHER); /* * Setup sysctl variables. */ xbb_setup_sysctl(xbb); /* * Create a taskqueue for doing work that must occur from a * thread context. */ xbb->io_taskqueue = taskqueue_create_fast(device_get_nameunit(dev), M_NOWAIT, taskqueue_thread_enqueue, /*contxt*/&xbb->io_taskqueue); if (xbb->io_taskqueue == NULL) { xbb_attach_failed(xbb, error, "Unable to create taskqueue"); return; } taskqueue_start_threads(&xbb->io_taskqueue, /*num threads*/1, /*priority*/PWAIT, /*thread name*/ "%s taskq", device_get_nameunit(dev)); /* Update hot-plug status to satisfy xend. */ error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), "hotplug-status", "connected"); if (error) { xbb_attach_failed(xbb, error, "writing %s/hotplug-status", xenbus_get_node(xbb->dev)); return; } /* Tell the front end that we are ready to connect. */ xenbus_set_state(dev, XenbusStateInitialised); } /** * Attach to a XenBus device that has been claimed by our probe routine. * * \param dev NewBus device object representing this Xen Block Back instance. * * \return 0 for success, errno codes for failure. */ static int xbb_attach(device_t dev) { struct xbb_softc *xbb; int error; u_int max_ring_page_order; struct sbuf *watch_path; DPRINTF("Attaching to %s\n", xenbus_get_node(dev)); /* * Basic initialization. * After this block it is safe to call xbb_detach() * to clean up any allocated data for this instance. */ xbb = device_get_softc(dev); xbb->dev = dev; xbb->otherend_id = xenbus_get_otherend_id(dev); TASK_INIT(&xbb->io_task, /*priority*/0, xbb_run_queue, xbb); mtx_init(&xbb->lock, device_get_nameunit(dev), NULL, MTX_DEF); /* * Publish protocol capabilities for consumption by the * front-end. */ error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), "feature-barrier", "1"); if (error) { xbb_attach_failed(xbb, error, "writing %s/feature-barrier", xenbus_get_node(xbb->dev)); return (error); } error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), "feature-flush-cache", "1"); if (error) { xbb_attach_failed(xbb, error, "writing %s/feature-flush-cache", xenbus_get_node(xbb->dev)); return (error); } max_ring_page_order = flsl(XBB_MAX_RING_PAGES) - 1; error = xs_printf(XST_NIL, xenbus_get_node(xbb->dev), "max-ring-page-order", "%u", max_ring_page_order); if (error) { xbb_attach_failed(xbb, error, "writing %s/max-ring-page-order", xenbus_get_node(xbb->dev)); return (error); } /* * We need to wait for hotplug script execution before * moving forward. */ watch_path = xs_join(xenbus_get_node(xbb->dev), "physical-device-path"); xbb->hotplug_watch.callback_data = (uintptr_t)dev; xbb->hotplug_watch.callback = xbb_attach_disk; KASSERT(xbb->hotplug_watch.node == NULL, ("watch node already setup")); xbb->hotplug_watch.node = strdup(sbuf_data(watch_path), M_XENBLOCKBACK); sbuf_delete(watch_path); error = xs_register_watch(&xbb->hotplug_watch); if (error != 0) { xbb_attach_failed(xbb, error, "failed to create watch on %s", xbb->hotplug_watch.node); free(xbb->hotplug_watch.node, M_XENBLOCKBACK); return (error); } /* Tell the toolstack blkback has attached. */ xenbus_set_state(dev, XenbusStateInitWait); return (0); } /** * Detach from a block back device instance. * * \param dev NewBus device object representing this Xen Block Back instance. * * \return 0 for success, errno codes for failure. * * \note A block back device may be detached at any time in its life-cycle, * including part way through the attach process. For this reason, * initialization order and the initialization state checks in this * routine must be carefully coupled so that attach time failures * are gracefully handled. */ static int xbb_detach(device_t dev) { struct xbb_softc *xbb; DPRINTF("\n"); xbb = device_get_softc(dev); mtx_lock(&xbb->lock); while (xbb_shutdown(xbb) == EAGAIN) { msleep(xbb, &xbb->lock, /*wakeup prio unchanged*/0, "xbb_shutdown", 0); } mtx_unlock(&xbb->lock); DPRINTF("\n"); if (xbb->io_taskqueue != NULL) taskqueue_free(xbb->io_taskqueue); if (xbb->xbb_stats != NULL) devstat_remove_entry(xbb->xbb_stats); if (xbb->xbb_stats_in != NULL) devstat_remove_entry(xbb->xbb_stats_in); xbb_close_backend(xbb); if (xbb->dev_mode != NULL) { free(xbb->dev_mode, M_XENSTORE); xbb->dev_mode = NULL; } if (xbb->dev_type != NULL) { free(xbb->dev_type, M_XENSTORE); xbb->dev_type = NULL; } if (xbb->dev_name != NULL) { free(xbb->dev_name, M_XENSTORE); xbb->dev_name = NULL; } mtx_destroy(&xbb->lock); return (0); } /** * Prepare this block back device for suspension of this VM. * * \param dev NewBus device object representing this Xen Block Back instance. * * \return 0 for success, errno codes for failure. */ static int xbb_suspend(device_t dev) { #ifdef NOT_YET struct xbb_softc *sc = device_get_softc(dev); /* Prevent new requests being issued until we fix things up. */ mtx_lock(&sc->xb_io_lock); sc->connected = BLKIF_STATE_SUSPENDED; mtx_unlock(&sc->xb_io_lock); #endif return (0); } /** * Perform any processing required to recover from a suspended state. * * \param dev NewBus device object representing this Xen Block Back instance. * * \return 0 for success, errno codes for failure. */ static int xbb_resume(device_t dev) { return (0); } /** * Handle state changes expressed via the XenStore by our front-end peer. * * \param dev NewBus device object representing this Xen * Block Back instance. * \param frontend_state The new state of the front-end. * * \return 0 for success, errno codes for failure. */ static void xbb_frontend_changed(device_t dev, XenbusState frontend_state) { struct xbb_softc *xbb = device_get_softc(dev); DPRINTF("frontend_state=%s, xbb_state=%s\n", xenbus_strstate(frontend_state), xenbus_strstate(xenbus_get_state(xbb->dev))); switch (frontend_state) { case XenbusStateInitialising: break; case XenbusStateInitialised: case XenbusStateConnected: xbb_connect(xbb); break; case XenbusStateClosing: case XenbusStateClosed: mtx_lock(&xbb->lock); xbb_shutdown(xbb); mtx_unlock(&xbb->lock); if (frontend_state == XenbusStateClosed) xenbus_set_state(xbb->dev, XenbusStateClosed); break; default: xenbus_dev_fatal(xbb->dev, EINVAL, "saw state %d at frontend", frontend_state); break; } } /*---------------------------- NewBus Registration ---------------------------*/ static device_method_t xbb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xbb_probe), DEVMETHOD(device_attach, xbb_attach), DEVMETHOD(device_detach, xbb_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, xbb_suspend), DEVMETHOD(device_resume, xbb_resume), /* Xenbus interface */ DEVMETHOD(xenbus_otherend_changed, xbb_frontend_changed), { 0, 0 } }; static driver_t xbb_driver = { "xbbd", xbb_methods, sizeof(struct xbb_softc), }; devclass_t xbb_devclass; DRIVER_MODULE(xbbd, xenbusb_back, xbb_driver, xbb_devclass, 0, 0); Index: head/sys/dev/xen/blkfront/blkfront.c =================================================================== --- head/sys/dev/xen/blkfront/blkfront.c (revision 327948) +++ head/sys/dev/xen/blkfront/blkfront.c (revision 327949) @@ -1,1615 +1,1614 @@ /* * XenBSD block device driver * * Copyright (c) 2010-2013 Spectra Logic Corporation * Copyright (c) 2009 Scott Long, Yahoo! * Copyright (c) 2009 Frank Suchomel, Citrix * Copyright (c) 2009 Doug F. Rabson, Citrix * Copyright (c) 2005 Kip Macy * Copyright (c) 2003-2004, Keir Fraser & Steve Hand * Modifications by Mark A. Williamson are (c) Intel Research Cambridge * * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to * deal in the Software without restriction, including without limitation the * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "xenbus_if.h" /*--------------------------- Forward Declarations ---------------------------*/ static void xbd_closing(device_t); static void xbd_startio(struct xbd_softc *sc); /*---------------------------------- Macros ----------------------------------*/ #if 0 #define DPRINTK(fmt, args...) printf("[XEN] %s:%d: " fmt ".\n", __func__, __LINE__, ##args) #else #define DPRINTK(fmt, args...) #endif #define XBD_SECTOR_SHFT 9 /*---------------------------- Global Static Data ----------------------------*/ static MALLOC_DEFINE(M_XENBLOCKFRONT, "xbd", "Xen Block Front driver data"); static int xbd_enable_indirect = 1; SYSCTL_NODE(_hw, OID_AUTO, xbd, CTLFLAG_RD, 0, "xbd driver parameters"); SYSCTL_INT(_hw_xbd, OID_AUTO, xbd_enable_indirect, CTLFLAG_RDTUN, &xbd_enable_indirect, 0, "Enable xbd indirect segments"); /*---------------------------- Command Processing ----------------------------*/ static void xbd_freeze(struct xbd_softc *sc, xbd_flag_t xbd_flag) { if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) != 0) return; sc->xbd_flags |= xbd_flag; sc->xbd_qfrozen_cnt++; } static void xbd_thaw(struct xbd_softc *sc, xbd_flag_t xbd_flag) { if (xbd_flag != XBDF_NONE && (sc->xbd_flags & xbd_flag) == 0) return; if (sc->xbd_qfrozen_cnt == 0) panic("%s: Thaw with flag 0x%x while not frozen.", __func__, xbd_flag); sc->xbd_flags &= ~xbd_flag; sc->xbd_qfrozen_cnt--; } static void xbd_cm_freeze(struct xbd_softc *sc, struct xbd_command *cm, xbdc_flag_t cm_flag) { if ((cm->cm_flags & XBDCF_FROZEN) != 0) return; cm->cm_flags |= XBDCF_FROZEN|cm_flag; xbd_freeze(sc, XBDF_NONE); } static void xbd_cm_thaw(struct xbd_softc *sc, struct xbd_command *cm) { if ((cm->cm_flags & XBDCF_FROZEN) == 0) return; cm->cm_flags &= ~XBDCF_FROZEN; xbd_thaw(sc, XBDF_NONE); } static inline void xbd_flush_requests(struct xbd_softc *sc) { int notify; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->xbd_ring, notify); if (notify) xen_intr_signal(sc->xen_intr_handle); } static void xbd_free_command(struct xbd_command *cm) { KASSERT((cm->cm_flags & XBDCF_Q_MASK) == XBD_Q_NONE, ("Freeing command that is still on queue %d.", cm->cm_flags & XBDCF_Q_MASK)); cm->cm_flags = XBDCF_INITIALIZER; cm->cm_bp = NULL; cm->cm_complete = NULL; xbd_enqueue_cm(cm, XBD_Q_FREE); xbd_thaw(cm->cm_sc, XBDF_CM_SHORTAGE); } static void xbd_mksegarray(bus_dma_segment_t *segs, int nsegs, grant_ref_t * gref_head, int otherend_id, int readonly, grant_ref_t * sg_ref, struct blkif_request_segment *sg) { struct blkif_request_segment *last_block_sg = sg + nsegs; vm_paddr_t buffer_ma; uint64_t fsect, lsect; int ref; while (sg < last_block_sg) { KASSERT(segs->ds_addr % (1 << XBD_SECTOR_SHFT) == 0, ("XEN disk driver I/O must be sector aligned")); KASSERT(segs->ds_len % (1 << XBD_SECTOR_SHFT) == 0, ("XEN disk driver I/Os must be a multiple of " "the sector length")); buffer_ma = segs->ds_addr; fsect = (buffer_ma & PAGE_MASK) >> XBD_SECTOR_SHFT; lsect = fsect + (segs->ds_len >> XBD_SECTOR_SHFT) - 1; KASSERT(lsect <= 7, ("XEN disk driver data cannot " "cross a page boundary")); /* install a grant reference. */ ref = gnttab_claim_grant_reference(gref_head); /* * GNTTAB_LIST_END == 0xffffffff, but it is private * to gnttab.c. */ KASSERT(ref != ~0, ("grant_reference failed")); gnttab_grant_foreign_access_ref( ref, otherend_id, buffer_ma >> PAGE_SHIFT, readonly); *sg_ref = ref; *sg = (struct blkif_request_segment) { .gref = ref, .first_sect = fsect, .last_sect = lsect }; sg++; sg_ref++; segs++; } } static void xbd_queue_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct xbd_softc *sc; struct xbd_command *cm; int op; cm = arg; sc = cm->cm_sc; if (error) { cm->cm_bp->bio_error = EIO; biodone(cm->cm_bp); xbd_free_command(cm); return; } KASSERT(nsegs <= sc->xbd_max_request_segments, ("Too many segments in a blkfront I/O")); if (nsegs <= BLKIF_MAX_SEGMENTS_PER_REQUEST) { blkif_request_t *ring_req; /* Fill out a blkif_request_t structure. */ ring_req = (blkif_request_t *) RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt); sc->xbd_ring.req_prod_pvt++; ring_req->id = cm->cm_id; ring_req->operation = cm->cm_operation; ring_req->sector_number = cm->cm_sector_number; ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk; ring_req->nr_segments = nsegs; cm->cm_nseg = nsegs; xbd_mksegarray(segs, nsegs, &cm->cm_gref_head, xenbus_get_otherend_id(sc->xbd_dev), cm->cm_operation == BLKIF_OP_WRITE, cm->cm_sg_refs, ring_req->seg); } else { blkif_request_indirect_t *ring_req; /* Fill out a blkif_request_indirect_t structure. */ ring_req = (blkif_request_indirect_t *) RING_GET_REQUEST(&sc->xbd_ring, sc->xbd_ring.req_prod_pvt); sc->xbd_ring.req_prod_pvt++; ring_req->id = cm->cm_id; ring_req->operation = BLKIF_OP_INDIRECT; ring_req->indirect_op = cm->cm_operation; ring_req->sector_number = cm->cm_sector_number; ring_req->handle = (blkif_vdev_t)(uintptr_t)sc->xbd_disk; ring_req->nr_segments = nsegs; cm->cm_nseg = nsegs; xbd_mksegarray(segs, nsegs, &cm->cm_gref_head, xenbus_get_otherend_id(sc->xbd_dev), cm->cm_operation == BLKIF_OP_WRITE, cm->cm_sg_refs, cm->cm_indirectionpages); memcpy(ring_req->indirect_grefs, &cm->cm_indirectionrefs, sizeof(grant_ref_t) * sc->xbd_max_request_indirectpages); } if (cm->cm_operation == BLKIF_OP_READ) op = BUS_DMASYNC_PREREAD; else if (cm->cm_operation == BLKIF_OP_WRITE) op = BUS_DMASYNC_PREWRITE; else op = 0; bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op); gnttab_free_grant_references(cm->cm_gref_head); xbd_enqueue_cm(cm, XBD_Q_BUSY); /* * If bus dma had to asynchronously call us back to dispatch * this command, we are no longer executing in the context of * xbd_startio(). Thus we cannot rely on xbd_startio()'s call to * xbd_flush_requests() to publish this command to the backend * along with any other commands that it could batch. */ if ((cm->cm_flags & XBDCF_ASYNC_MAPPING) != 0) xbd_flush_requests(sc); return; } static int xbd_queue_request(struct xbd_softc *sc, struct xbd_command *cm) { int error; if (cm->cm_bp != NULL) error = bus_dmamap_load_bio(sc->xbd_io_dmat, cm->cm_map, cm->cm_bp, xbd_queue_cb, cm, 0); else error = bus_dmamap_load(sc->xbd_io_dmat, cm->cm_map, cm->cm_data, cm->cm_datalen, xbd_queue_cb, cm, 0); if (error == EINPROGRESS) { /* * Maintain queuing order by freezing the queue. The next * command may not require as many resources as the command * we just attempted to map, so we can't rely on bus dma * blocking for it too. */ xbd_cm_freeze(sc, cm, XBDCF_ASYNC_MAPPING); return (0); } return (error); } static void xbd_restart_queue_callback(void *arg) { struct xbd_softc *sc = arg; mtx_lock(&sc->xbd_io_lock); xbd_thaw(sc, XBDF_GNT_SHORTAGE); xbd_startio(sc); mtx_unlock(&sc->xbd_io_lock); } static struct xbd_command * xbd_bio_command(struct xbd_softc *sc) { struct xbd_command *cm; struct bio *bp; if (__predict_false(sc->xbd_state != XBD_STATE_CONNECTED)) return (NULL); bp = xbd_dequeue_bio(sc); if (bp == NULL) return (NULL); if ((cm = xbd_dequeue_cm(sc, XBD_Q_FREE)) == NULL) { xbd_freeze(sc, XBDF_CM_SHORTAGE); xbd_requeue_bio(sc, bp); return (NULL); } if (gnttab_alloc_grant_references(sc->xbd_max_request_segments, &cm->cm_gref_head) != 0) { gnttab_request_free_callback(&sc->xbd_callback, xbd_restart_queue_callback, sc, sc->xbd_max_request_segments); xbd_freeze(sc, XBDF_GNT_SHORTAGE); xbd_requeue_bio(sc, bp); xbd_enqueue_cm(cm, XBD_Q_FREE); return (NULL); } cm->cm_bp = bp; cm->cm_sector_number = (blkif_sector_t)bp->bio_pblkno; switch (bp->bio_cmd) { case BIO_READ: cm->cm_operation = BLKIF_OP_READ; break; case BIO_WRITE: cm->cm_operation = BLKIF_OP_WRITE; if ((bp->bio_flags & BIO_ORDERED) != 0) { if ((sc->xbd_flags & XBDF_BARRIER) != 0) { cm->cm_operation = BLKIF_OP_WRITE_BARRIER; } else { /* * Single step this command. */ cm->cm_flags |= XBDCF_Q_FREEZE; if (xbd_queue_length(sc, XBD_Q_BUSY) != 0) { /* * Wait for in-flight requests to * finish. */ xbd_freeze(sc, XBDF_WAIT_IDLE); xbd_requeue_cm(cm, XBD_Q_READY); return (NULL); } } } break; case BIO_FLUSH: if ((sc->xbd_flags & XBDF_FLUSH) != 0) cm->cm_operation = BLKIF_OP_FLUSH_DISKCACHE; else if ((sc->xbd_flags & XBDF_BARRIER) != 0) cm->cm_operation = BLKIF_OP_WRITE_BARRIER; else panic("flush request, but no flush support available"); break; default: panic("unknown bio command %d", bp->bio_cmd); } return (cm); } /* * Dequeue buffers and place them in the shared communication ring. * Return when no more requests can be accepted or all buffers have * been queued. * * Signal XEN once the ring has been filled out. */ static void xbd_startio(struct xbd_softc *sc) { struct xbd_command *cm; int error, queued = 0; mtx_assert(&sc->xbd_io_lock, MA_OWNED); if (sc->xbd_state != XBD_STATE_CONNECTED) return; while (!RING_FULL(&sc->xbd_ring)) { if (sc->xbd_qfrozen_cnt != 0) break; cm = xbd_dequeue_cm(sc, XBD_Q_READY); if (cm == NULL) cm = xbd_bio_command(sc); if (cm == NULL) break; if ((cm->cm_flags & XBDCF_Q_FREEZE) != 0) { /* * Single step command. Future work is * held off until this command completes. */ xbd_cm_freeze(sc, cm, XBDCF_Q_FREEZE); } if ((error = xbd_queue_request(sc, cm)) != 0) { printf("xbd_queue_request returned %d\n", error); break; } queued++; } if (queued != 0) xbd_flush_requests(sc); } static void xbd_bio_complete(struct xbd_softc *sc, struct xbd_command *cm) { struct bio *bp; bp = cm->cm_bp; if (__predict_false(cm->cm_status != BLKIF_RSP_OKAY)) { disk_err(bp, "disk error" , -1, 0); printf(" status: %x\n", cm->cm_status); bp->bio_flags |= BIO_ERROR; } if (bp->bio_flags & BIO_ERROR) bp->bio_error = EIO; else bp->bio_resid = 0; xbd_free_command(cm); biodone(bp); } static void xbd_int(void *xsc) { struct xbd_softc *sc = xsc; struct xbd_command *cm; blkif_response_t *bret; RING_IDX i, rp; int op; mtx_lock(&sc->xbd_io_lock); if (__predict_false(sc->xbd_state == XBD_STATE_DISCONNECTED)) { mtx_unlock(&sc->xbd_io_lock); return; } again: rp = sc->xbd_ring.sring->rsp_prod; rmb(); /* Ensure we see queued responses up to 'rp'. */ for (i = sc->xbd_ring.rsp_cons; i != rp;) { bret = RING_GET_RESPONSE(&sc->xbd_ring, i); cm = &sc->xbd_shadow[bret->id]; xbd_remove_cm(cm, XBD_Q_BUSY); gnttab_end_foreign_access_references(cm->cm_nseg, cm->cm_sg_refs); i++; if (cm->cm_operation == BLKIF_OP_READ) op = BUS_DMASYNC_POSTREAD; else if (cm->cm_operation == BLKIF_OP_WRITE || cm->cm_operation == BLKIF_OP_WRITE_BARRIER) op = BUS_DMASYNC_POSTWRITE; else op = 0; bus_dmamap_sync(sc->xbd_io_dmat, cm->cm_map, op); bus_dmamap_unload(sc->xbd_io_dmat, cm->cm_map); /* * Release any hold this command has on future command * dispatch. */ xbd_cm_thaw(sc, cm); /* * Directly call the i/o complete routine to save an * an indirection in the common case. */ cm->cm_status = bret->status; if (cm->cm_bp) xbd_bio_complete(sc, cm); else if (cm->cm_complete != NULL) cm->cm_complete(cm); else xbd_free_command(cm); } sc->xbd_ring.rsp_cons = i; if (i != sc->xbd_ring.req_prod_pvt) { int more_to_do; RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, more_to_do); if (more_to_do) goto again; } else { sc->xbd_ring.sring->rsp_event = i + 1; } if (xbd_queue_length(sc, XBD_Q_BUSY) == 0) xbd_thaw(sc, XBDF_WAIT_IDLE); xbd_startio(sc); if (__predict_false(sc->xbd_state == XBD_STATE_SUSPENDED)) wakeup(&sc->xbd_cm_q[XBD_Q_BUSY]); mtx_unlock(&sc->xbd_io_lock); } /*------------------------------- Dump Support -------------------------------*/ /** * Quiesce the disk writes for a dump file before allowing the next buffer. */ static void xbd_quiesce(struct xbd_softc *sc) { int mtd; // While there are outstanding requests while (xbd_queue_length(sc, XBD_Q_BUSY) != 0) { RING_FINAL_CHECK_FOR_RESPONSES(&sc->xbd_ring, mtd); if (mtd) { /* Received request completions, update queue. */ xbd_int(sc); } if (xbd_queue_length(sc, XBD_Q_BUSY) != 0) { /* * Still pending requests, wait for the disk i/o * to complete. */ HYPERVISOR_yield(); } } } /* Kernel dump function for a paravirtualized disk device */ static void xbd_dump_complete(struct xbd_command *cm) { xbd_enqueue_cm(cm, XBD_Q_COMPLETE); } static int xbd_dump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) { struct disk *dp = arg; struct xbd_softc *sc = dp->d_drv1; struct xbd_command *cm; size_t chunk; int sbp; int rc = 0; if (length <= 0) return (rc); xbd_quiesce(sc); /* All quiet on the western front. */ /* * If this lock is held, then this module is failing, and a * successful kernel dump is highly unlikely anyway. */ mtx_lock(&sc->xbd_io_lock); /* Split the 64KB block as needed */ for (sbp=0; length > 0; sbp++) { cm = xbd_dequeue_cm(sc, XBD_Q_FREE); if (cm == NULL) { mtx_unlock(&sc->xbd_io_lock); device_printf(sc->xbd_dev, "dump: no more commands?\n"); return (EBUSY); } if (gnttab_alloc_grant_references(sc->xbd_max_request_segments, &cm->cm_gref_head) != 0) { xbd_free_command(cm); mtx_unlock(&sc->xbd_io_lock); device_printf(sc->xbd_dev, "no more grant allocs?\n"); return (EBUSY); } chunk = length > sc->xbd_max_request_size ? sc->xbd_max_request_size : length; cm->cm_data = virtual; cm->cm_datalen = chunk; cm->cm_operation = BLKIF_OP_WRITE; cm->cm_sector_number = offset / dp->d_sectorsize; cm->cm_complete = xbd_dump_complete; xbd_enqueue_cm(cm, XBD_Q_READY); length -= chunk; offset += chunk; virtual = (char *) virtual + chunk; } /* Tell DOM0 to do the I/O */ xbd_startio(sc); mtx_unlock(&sc->xbd_io_lock); /* Poll for the completion. */ xbd_quiesce(sc); /* All quite on the eastern front */ /* If there were any errors, bail out... */ while ((cm = xbd_dequeue_cm(sc, XBD_Q_COMPLETE)) != NULL) { if (cm->cm_status != BLKIF_RSP_OKAY) { device_printf(sc->xbd_dev, "Dump I/O failed at sector %jd\n", cm->cm_sector_number); rc = EIO; } xbd_free_command(cm); } return (rc); } /*----------------------------- Disk Entrypoints -----------------------------*/ static int xbd_open(struct disk *dp) { struct xbd_softc *sc = dp->d_drv1; if (sc == NULL) { printf("xbd%d: not found", dp->d_unit); return (ENXIO); } sc->xbd_flags |= XBDF_OPEN; sc->xbd_users++; return (0); } static int xbd_close(struct disk *dp) { struct xbd_softc *sc = dp->d_drv1; if (sc == NULL) return (ENXIO); sc->xbd_flags &= ~XBDF_OPEN; if (--(sc->xbd_users) == 0) { /* * Check whether we have been instructed to close. We will * have ignored this request initially, as the device was * still mounted. */ if (xenbus_get_otherend_state(sc->xbd_dev) == XenbusStateClosing) xbd_closing(sc->xbd_dev); } return (0); } static int xbd_ioctl(struct disk *dp, u_long cmd, void *addr, int flag, struct thread *td) { struct xbd_softc *sc = dp->d_drv1; if (sc == NULL) return (ENXIO); return (ENOTTY); } /* * Read/write routine for a buffer. Finds the proper unit, place it on * the sortq and kick the controller. */ static void xbd_strategy(struct bio *bp) { struct xbd_softc *sc = bp->bio_disk->d_drv1; /* bogus disk? */ if (sc == NULL) { bp->bio_error = EINVAL; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; biodone(bp); return; } /* * Place it in the queue of disk activities for this disk */ mtx_lock(&sc->xbd_io_lock); xbd_enqueue_bio(sc, bp); xbd_startio(sc); mtx_unlock(&sc->xbd_io_lock); return; } /*------------------------------ Ring Management -----------------------------*/ static int xbd_alloc_ring(struct xbd_softc *sc) { blkif_sring_t *sring; uintptr_t sring_page_addr; int error; int i; sring = malloc(sc->xbd_ring_pages * PAGE_SIZE, M_XENBLOCKFRONT, M_NOWAIT|M_ZERO); if (sring == NULL) { xenbus_dev_fatal(sc->xbd_dev, ENOMEM, "allocating shared ring"); return (ENOMEM); } SHARED_RING_INIT(sring); FRONT_RING_INIT(&sc->xbd_ring, sring, sc->xbd_ring_pages * PAGE_SIZE); for (i = 0, sring_page_addr = (uintptr_t)sring; i < sc->xbd_ring_pages; i++, sring_page_addr += PAGE_SIZE) { error = xenbus_grant_ring(sc->xbd_dev, (vtophys(sring_page_addr) >> PAGE_SHIFT), &sc->xbd_ring_ref[i]); if (error) { xenbus_dev_fatal(sc->xbd_dev, error, "granting ring_ref(%d)", i); return (error); } } if (sc->xbd_ring_pages == 1) { error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev), "ring-ref", "%u", sc->xbd_ring_ref[0]); if (error) { xenbus_dev_fatal(sc->xbd_dev, error, "writing %s/ring-ref", xenbus_get_node(sc->xbd_dev)); return (error); } } else { for (i = 0; i < sc->xbd_ring_pages; i++) { char ring_ref_name[]= "ring_refXX"; snprintf(ring_ref_name, sizeof(ring_ref_name), "ring-ref%u", i); error = xs_printf(XST_NIL, xenbus_get_node(sc->xbd_dev), ring_ref_name, "%u", sc->xbd_ring_ref[i]); if (error) { xenbus_dev_fatal(sc->xbd_dev, error, "writing %s/%s", xenbus_get_node(sc->xbd_dev), ring_ref_name); return (error); } } } error = xen_intr_alloc_and_bind_local_port(sc->xbd_dev, xenbus_get_otherend_id(sc->xbd_dev), NULL, xbd_int, sc, INTR_TYPE_BIO | INTR_MPSAFE, &sc->xen_intr_handle); if (error) { xenbus_dev_fatal(sc->xbd_dev, error, "xen_intr_alloc_and_bind_local_port failed"); return (error); } return (0); } static void xbd_free_ring(struct xbd_softc *sc) { int i; if (sc->xbd_ring.sring == NULL) return; for (i = 0; i < sc->xbd_ring_pages; i++) { if (sc->xbd_ring_ref[i] != GRANT_REF_INVALID) { gnttab_end_foreign_access_ref(sc->xbd_ring_ref[i]); sc->xbd_ring_ref[i] = GRANT_REF_INVALID; } } free(sc->xbd_ring.sring, M_XENBLOCKFRONT); sc->xbd_ring.sring = NULL; } /*-------------------------- Initialization/Teardown -------------------------*/ static int xbd_feature_string(struct xbd_softc *sc, char *features, size_t len) { struct sbuf sb; int feature_cnt; sbuf_new(&sb, features, len, SBUF_FIXEDLEN); feature_cnt = 0; if ((sc->xbd_flags & XBDF_FLUSH) != 0) { sbuf_printf(&sb, "flush"); feature_cnt++; } if ((sc->xbd_flags & XBDF_BARRIER) != 0) { if (feature_cnt != 0) sbuf_printf(&sb, ", "); sbuf_printf(&sb, "write_barrier"); feature_cnt++; } if ((sc->xbd_flags & XBDF_DISCARD) != 0) { if (feature_cnt != 0) sbuf_printf(&sb, ", "); sbuf_printf(&sb, "discard"); feature_cnt++; } if ((sc->xbd_flags & XBDF_PERSISTENT) != 0) { if (feature_cnt != 0) sbuf_printf(&sb, ", "); sbuf_printf(&sb, "persistent_grants"); feature_cnt++; } (void) sbuf_finish(&sb); return (sbuf_len(&sb)); } static int xbd_sysctl_features(SYSCTL_HANDLER_ARGS) { char features[80]; struct xbd_softc *sc = arg1; int error; int len; error = sysctl_wire_old_buffer(req, 0); if (error != 0) return (error); len = xbd_feature_string(sc, features, sizeof(features)); /* len is -1 on error, which will make the SYSCTL_OUT a no-op. */ return (SYSCTL_OUT(req, features, len + 1/*NUL*/)); } static void xbd_setup_sysctl(struct xbd_softc *xbd) { struct sysctl_ctx_list *sysctl_ctx = NULL; struct sysctl_oid *sysctl_tree = NULL; struct sysctl_oid_list *children; sysctl_ctx = device_get_sysctl_ctx(xbd->xbd_dev); if (sysctl_ctx == NULL) return; sysctl_tree = device_get_sysctl_tree(xbd->xbd_dev); if (sysctl_tree == NULL) return; children = SYSCTL_CHILDREN(sysctl_tree); SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO, "max_requests", CTLFLAG_RD, &xbd->xbd_max_requests, -1, "maximum outstanding requests (negotiated)"); SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO, "max_request_segments", CTLFLAG_RD, &xbd->xbd_max_request_segments, 0, "maximum number of pages per requests (negotiated)"); SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO, "max_request_size", CTLFLAG_RD, &xbd->xbd_max_request_size, 0, "maximum size in bytes of a request (negotiated)"); SYSCTL_ADD_UINT(sysctl_ctx, children, OID_AUTO, "ring_pages", CTLFLAG_RD, &xbd->xbd_ring_pages, 0, "communication channel pages (negotiated)"); SYSCTL_ADD_PROC(sysctl_ctx, children, OID_AUTO, "features", CTLTYPE_STRING|CTLFLAG_RD, xbd, 0, xbd_sysctl_features, "A", "protocol features (negotiated)"); } /* * Translate Linux major/minor to an appropriate name and unit * number. For HVM guests, this allows us to use the same drive names * with blkfront as the emulated drives, easing transition slightly. */ static void xbd_vdevice_to_unit(uint32_t vdevice, int *unit, const char **name) { static struct vdev_info { int major; int shift; int base; const char *name; } info[] = { {3, 6, 0, "ada"}, /* ide0 */ {22, 6, 2, "ada"}, /* ide1 */ {33, 6, 4, "ada"}, /* ide2 */ {34, 6, 6, "ada"}, /* ide3 */ {56, 6, 8, "ada"}, /* ide4 */ {57, 6, 10, "ada"}, /* ide5 */ {88, 6, 12, "ada"}, /* ide6 */ {89, 6, 14, "ada"}, /* ide7 */ {90, 6, 16, "ada"}, /* ide8 */ {91, 6, 18, "ada"}, /* ide9 */ {8, 4, 0, "da"}, /* scsi disk0 */ {65, 4, 16, "da"}, /* scsi disk1 */ {66, 4, 32, "da"}, /* scsi disk2 */ {67, 4, 48, "da"}, /* scsi disk3 */ {68, 4, 64, "da"}, /* scsi disk4 */ {69, 4, 80, "da"}, /* scsi disk5 */ {70, 4, 96, "da"}, /* scsi disk6 */ {71, 4, 112, "da"}, /* scsi disk7 */ {128, 4, 128, "da"}, /* scsi disk8 */ {129, 4, 144, "da"}, /* scsi disk9 */ {130, 4, 160, "da"}, /* scsi disk10 */ {131, 4, 176, "da"}, /* scsi disk11 */ {132, 4, 192, "da"}, /* scsi disk12 */ {133, 4, 208, "da"}, /* scsi disk13 */ {134, 4, 224, "da"}, /* scsi disk14 */ {135, 4, 240, "da"}, /* scsi disk15 */ {202, 4, 0, "xbd"}, /* xbd */ {0, 0, 0, NULL}, }; int major = vdevice >> 8; int minor = vdevice & 0xff; int i; if (vdevice & (1 << 28)) { *unit = (vdevice & ((1 << 28) - 1)) >> 8; *name = "xbd"; return; } for (i = 0; info[i].major; i++) { if (info[i].major == major) { *unit = info[i].base + (minor >> info[i].shift); *name = info[i].name; return; } } *unit = minor >> 4; *name = "xbd"; } int xbd_instance_create(struct xbd_softc *sc, blkif_sector_t sectors, int vdevice, uint16_t vdisk_info, unsigned long sector_size, unsigned long phys_sector_size) { char features[80]; int unit, error = 0; const char *name; xbd_vdevice_to_unit(vdevice, &unit, &name); sc->xbd_unit = unit; if (strcmp(name, "xbd") != 0) device_printf(sc->xbd_dev, "attaching as %s%d\n", name, unit); if (xbd_feature_string(sc, features, sizeof(features)) > 0) { device_printf(sc->xbd_dev, "features: %s\n", features); } sc->xbd_disk = disk_alloc(); sc->xbd_disk->d_unit = sc->xbd_unit; sc->xbd_disk->d_open = xbd_open; sc->xbd_disk->d_close = xbd_close; sc->xbd_disk->d_ioctl = xbd_ioctl; sc->xbd_disk->d_strategy = xbd_strategy; sc->xbd_disk->d_dump = xbd_dump; sc->xbd_disk->d_name = name; sc->xbd_disk->d_drv1 = sc; sc->xbd_disk->d_sectorsize = sector_size; sc->xbd_disk->d_stripesize = phys_sector_size; sc->xbd_disk->d_stripeoffset = 0; sc->xbd_disk->d_mediasize = sectors * sector_size; sc->xbd_disk->d_maxsize = sc->xbd_max_request_size; sc->xbd_disk->d_flags = DISKFLAG_UNMAPPED_BIO; if ((sc->xbd_flags & (XBDF_FLUSH|XBDF_BARRIER)) != 0) { sc->xbd_disk->d_flags |= DISKFLAG_CANFLUSHCACHE; device_printf(sc->xbd_dev, "synchronize cache commands enabled.\n"); } disk_create(sc->xbd_disk, DISK_VERSION); return error; } static void xbd_free(struct xbd_softc *sc) { int i; /* Prevent new requests being issued until we fix things up. */ mtx_lock(&sc->xbd_io_lock); sc->xbd_state = XBD_STATE_DISCONNECTED; mtx_unlock(&sc->xbd_io_lock); /* Free resources associated with old device channel. */ xbd_free_ring(sc); if (sc->xbd_shadow) { for (i = 0; i < sc->xbd_max_requests; i++) { struct xbd_command *cm; cm = &sc->xbd_shadow[i]; if (cm->cm_sg_refs != NULL) { free(cm->cm_sg_refs, M_XENBLOCKFRONT); cm->cm_sg_refs = NULL; } if (cm->cm_indirectionpages != NULL) { gnttab_end_foreign_access_references( sc->xbd_max_request_indirectpages, &cm->cm_indirectionrefs[0]); contigfree(cm->cm_indirectionpages, PAGE_SIZE * sc->xbd_max_request_indirectpages, M_XENBLOCKFRONT); cm->cm_indirectionpages = NULL; } bus_dmamap_destroy(sc->xbd_io_dmat, cm->cm_map); } free(sc->xbd_shadow, M_XENBLOCKFRONT); sc->xbd_shadow = NULL; bus_dma_tag_destroy(sc->xbd_io_dmat); xbd_initq_cm(sc, XBD_Q_FREE); xbd_initq_cm(sc, XBD_Q_READY); xbd_initq_cm(sc, XBD_Q_COMPLETE); } xen_intr_unbind(&sc->xen_intr_handle); } /*--------------------------- State Change Handlers --------------------------*/ static void xbd_initialize(struct xbd_softc *sc) { const char *otherend_path; const char *node_path; uint32_t max_ring_page_order; int error; if (xenbus_get_state(sc->xbd_dev) != XenbusStateInitialising) { /* Initialization has already been performed. */ return; } /* * Protocol defaults valid even if negotiation for a * setting fails. */ max_ring_page_order = 0; sc->xbd_ring_pages = 1; /* * Protocol negotiation. * * \note xs_gather() returns on the first encountered error, so * we must use independent calls in order to guarantee * we don't miss information in a sparsly populated back-end * tree. * * \note xs_scanf() does not update variables for unmatched * fields. */ otherend_path = xenbus_get_otherend_path(sc->xbd_dev); node_path = xenbus_get_node(sc->xbd_dev); /* Support both backend schemes for relaying ring page limits. */ (void)xs_scanf(XST_NIL, otherend_path, "max-ring-page-order", NULL, "%" PRIu32, &max_ring_page_order); sc->xbd_ring_pages = 1 << max_ring_page_order; (void)xs_scanf(XST_NIL, otherend_path, "max-ring-pages", NULL, "%" PRIu32, &sc->xbd_ring_pages); if (sc->xbd_ring_pages < 1) sc->xbd_ring_pages = 1; if (sc->xbd_ring_pages > XBD_MAX_RING_PAGES) { device_printf(sc->xbd_dev, "Back-end specified ring-pages of %u " "limited to front-end limit of %u.\n", sc->xbd_ring_pages, XBD_MAX_RING_PAGES); sc->xbd_ring_pages = XBD_MAX_RING_PAGES; } if (powerof2(sc->xbd_ring_pages) == 0) { uint32_t new_page_limit; new_page_limit = 0x01 << (fls(sc->xbd_ring_pages) - 1); device_printf(sc->xbd_dev, "Back-end specified ring-pages of %u " "is not a power of 2. Limited to %u.\n", sc->xbd_ring_pages, new_page_limit); sc->xbd_ring_pages = new_page_limit; } sc->xbd_max_requests = BLKIF_MAX_RING_REQUESTS(sc->xbd_ring_pages * PAGE_SIZE); if (sc->xbd_max_requests > XBD_MAX_REQUESTS) { device_printf(sc->xbd_dev, "Back-end specified max_requests of %u " "limited to front-end limit of %zu.\n", sc->xbd_max_requests, XBD_MAX_REQUESTS); sc->xbd_max_requests = XBD_MAX_REQUESTS; } if (xbd_alloc_ring(sc) != 0) return; /* Support both backend schemes for relaying ring page limits. */ if (sc->xbd_ring_pages > 1) { error = xs_printf(XST_NIL, node_path, "num-ring-pages","%u", sc->xbd_ring_pages); if (error) { xenbus_dev_fatal(sc->xbd_dev, error, "writing %s/num-ring-pages", node_path); return; } error = xs_printf(XST_NIL, node_path, "ring-page-order", "%u", fls(sc->xbd_ring_pages) - 1); if (error) { xenbus_dev_fatal(sc->xbd_dev, error, "writing %s/ring-page-order", node_path); return; } } error = xs_printf(XST_NIL, node_path, "event-channel", "%u", xen_intr_port(sc->xen_intr_handle)); if (error) { xenbus_dev_fatal(sc->xbd_dev, error, "writing %s/event-channel", node_path); return; } error = xs_printf(XST_NIL, node_path, "protocol", "%s", XEN_IO_PROTO_ABI_NATIVE); if (error) { xenbus_dev_fatal(sc->xbd_dev, error, "writing %s/protocol", node_path); return; } xenbus_set_state(sc->xbd_dev, XenbusStateInitialised); } /* * Invoked when the backend is finally 'ready' (and has published * the details about the physical device - #sectors, size, etc). */ static void xbd_connect(struct xbd_softc *sc) { device_t dev = sc->xbd_dev; unsigned long sectors, sector_size, phys_sector_size; unsigned int binfo; int err, feature_barrier, feature_flush; int i, j; if (sc->xbd_state == XBD_STATE_CONNECTED || sc->xbd_state == XBD_STATE_SUSPENDED) return; DPRINTK("blkfront.c:connect:%s.\n", xenbus_get_otherend_path(dev)); err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), "sectors", "%lu", §ors, "info", "%u", &binfo, "sector-size", "%lu", §or_size, NULL); if (err) { xenbus_dev_fatal(dev, err, "reading backend fields at %s", xenbus_get_otherend_path(dev)); return; } if ((sectors == 0) || (sector_size == 0)) { xenbus_dev_fatal(dev, 0, "invalid parameters from %s:" " sectors = %lu, sector_size = %lu", xenbus_get_otherend_path(dev), sectors, sector_size); return; } err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), "physical-sector-size", "%lu", &phys_sector_size, NULL); if (err || phys_sector_size <= sector_size) phys_sector_size = 0; err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), "feature-barrier", "%d", &feature_barrier, NULL); if (err == 0 && feature_barrier != 0) sc->xbd_flags |= XBDF_BARRIER; err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), "feature-flush-cache", "%d", &feature_flush, NULL); if (err == 0 && feature_flush != 0) sc->xbd_flags |= XBDF_FLUSH; err = xs_gather(XST_NIL, xenbus_get_otherend_path(dev), "feature-max-indirect-segments", "%" PRIu32, &sc->xbd_max_request_segments, NULL); if ((err != 0) || (xbd_enable_indirect == 0)) sc->xbd_max_request_segments = 0; if (sc->xbd_max_request_segments > XBD_MAX_INDIRECT_SEGMENTS) sc->xbd_max_request_segments = XBD_MAX_INDIRECT_SEGMENTS; if (sc->xbd_max_request_segments > XBD_SIZE_TO_SEGS(MAXPHYS)) sc->xbd_max_request_segments = XBD_SIZE_TO_SEGS(MAXPHYS); sc->xbd_max_request_indirectpages = XBD_INDIRECT_SEGS_TO_PAGES(sc->xbd_max_request_segments); if (sc->xbd_max_request_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST) sc->xbd_max_request_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST; sc->xbd_max_request_size = XBD_SEGS_TO_SIZE(sc->xbd_max_request_segments); /* Allocate datastructures based on negotiated values. */ err = bus_dma_tag_create( bus_get_dma_tag(sc->xbd_dev), /* parent */ 512, PAGE_SIZE, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->xbd_max_request_size, sc->xbd_max_request_segments, PAGE_SIZE, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->xbd_io_lock, /* lockarg */ &sc->xbd_io_dmat); if (err != 0) { xenbus_dev_fatal(sc->xbd_dev, err, "Cannot allocate parent DMA tag\n"); return; } /* Per-transaction data allocation. */ - sc->xbd_shadow = malloc(sizeof(*sc->xbd_shadow) * sc->xbd_max_requests, - M_XENBLOCKFRONT, M_NOWAIT|M_ZERO); + sc->xbd_shadow = mallocarray(sc->xbd_max_requests, + sizeof(*sc->xbd_shadow), M_XENBLOCKFRONT, M_NOWAIT|M_ZERO); if (sc->xbd_shadow == NULL) { bus_dma_tag_destroy(sc->xbd_io_dmat); xenbus_dev_fatal(sc->xbd_dev, ENOMEM, "Cannot allocate request structures\n"); return; } for (i = 0; i < sc->xbd_max_requests; i++) { struct xbd_command *cm; void * indirectpages; cm = &sc->xbd_shadow[i]; - cm->cm_sg_refs = malloc( - sizeof(grant_ref_t) * sc->xbd_max_request_segments, - M_XENBLOCKFRONT, M_NOWAIT); + cm->cm_sg_refs = mallocarray(sc->xbd_max_request_segments, + sizeof(grant_ref_t), M_XENBLOCKFRONT, M_NOWAIT); if (cm->cm_sg_refs == NULL) break; cm->cm_id = i; cm->cm_flags = XBDCF_INITIALIZER; cm->cm_sc = sc; if (bus_dmamap_create(sc->xbd_io_dmat, 0, &cm->cm_map) != 0) break; if (sc->xbd_max_request_indirectpages > 0) { indirectpages = contigmalloc( PAGE_SIZE * sc->xbd_max_request_indirectpages, M_XENBLOCKFRONT, M_ZERO, 0, ~0, PAGE_SIZE, 0); } else { indirectpages = NULL; } for (j = 0; j < sc->xbd_max_request_indirectpages; j++) { if (gnttab_grant_foreign_access( xenbus_get_otherend_id(sc->xbd_dev), (vtophys(indirectpages) >> PAGE_SHIFT) + j, 1 /* grant read-only access */, &cm->cm_indirectionrefs[j])) break; } if (j < sc->xbd_max_request_indirectpages) break; cm->cm_indirectionpages = indirectpages; xbd_free_command(cm); } if (sc->xbd_disk == NULL) { device_printf(dev, "%juMB <%s> at %s", (uintmax_t) sectors / (1048576 / sector_size), device_get_desc(dev), xenbus_get_node(dev)); bus_print_child_footer(device_get_parent(dev), dev); xbd_instance_create(sc, sectors, sc->xbd_vdevice, binfo, sector_size, phys_sector_size); } (void)xenbus_set_state(dev, XenbusStateConnected); /* Kick pending requests. */ mtx_lock(&sc->xbd_io_lock); sc->xbd_state = XBD_STATE_CONNECTED; xbd_startio(sc); sc->xbd_flags |= XBDF_READY; mtx_unlock(&sc->xbd_io_lock); } /** * Handle the change of state of the backend to Closing. We must delete our * device-layer structures now, to ensure that writes are flushed through to * the backend. Once this is done, we can switch to Closed in * acknowledgement. */ static void xbd_closing(device_t dev) { struct xbd_softc *sc = device_get_softc(dev); xenbus_set_state(dev, XenbusStateClosing); DPRINTK("xbd_closing: %s removed\n", xenbus_get_node(dev)); if (sc->xbd_disk != NULL) { disk_destroy(sc->xbd_disk); sc->xbd_disk = NULL; } xenbus_set_state(dev, XenbusStateClosed); } /*---------------------------- NewBus Entrypoints ----------------------------*/ static int xbd_probe(device_t dev) { if (strcmp(xenbus_get_type(dev), "vbd") != 0) return (ENXIO); if (xen_hvm_domain() && xen_disable_pv_disks != 0) return (ENXIO); if (xen_hvm_domain()) { int error; char *type; /* * When running in an HVM domain, IDE disk emulation is * disabled early in boot so that native drivers will * not see emulated hardware. However, CDROM device * emulation cannot be disabled. * * Through use of FreeBSD's vm_guest and xen_hvm_domain() * APIs, we could modify the native CDROM driver to fail its * probe when running under Xen. Unfortunatlely, the PV * CDROM support in XenServer (up through at least version * 6.2) isn't functional, so we instead rely on the emulated * CDROM instance, and fail to attach the PV one here in * the blkfront driver. */ error = xs_read(XST_NIL, xenbus_get_node(dev), "device-type", NULL, (void **) &type); if (error) return (ENXIO); if (strncmp(type, "cdrom", 5) == 0) { free(type, M_XENSTORE); return (ENXIO); } free(type, M_XENSTORE); } device_set_desc(dev, "Virtual Block Device"); device_quiet(dev); return (0); } /* * Setup supplies the backend dir, virtual device. We place an event * channel and shared frame entries. We watch backend to wait if it's * ok. */ static int xbd_attach(device_t dev) { struct xbd_softc *sc; const char *name; uint32_t vdevice; int error; int i; int unit; /* FIXME: Use dynamic device id if this is not set. */ error = xs_scanf(XST_NIL, xenbus_get_node(dev), "virtual-device", NULL, "%" PRIu32, &vdevice); if (error) error = xs_scanf(XST_NIL, xenbus_get_node(dev), "virtual-device-ext", NULL, "%" PRIu32, &vdevice); if (error) { xenbus_dev_fatal(dev, error, "reading virtual-device"); device_printf(dev, "Couldn't determine virtual device.\n"); return (error); } xbd_vdevice_to_unit(vdevice, &unit, &name); if (!strcmp(name, "xbd")) device_set_unit(dev, unit); sc = device_get_softc(dev); mtx_init(&sc->xbd_io_lock, "blkfront i/o lock", NULL, MTX_DEF); xbd_initqs(sc); for (i = 0; i < XBD_MAX_RING_PAGES; i++) sc->xbd_ring_ref[i] = GRANT_REF_INVALID; sc->xbd_dev = dev; sc->xbd_vdevice = vdevice; sc->xbd_state = XBD_STATE_DISCONNECTED; xbd_setup_sysctl(sc); /* Wait for backend device to publish its protocol capabilities. */ xenbus_set_state(dev, XenbusStateInitialising); return (0); } static int xbd_detach(device_t dev) { struct xbd_softc *sc = device_get_softc(dev); DPRINTK("%s: %s removed\n", __func__, xenbus_get_node(dev)); xbd_free(sc); mtx_destroy(&sc->xbd_io_lock); return 0; } static int xbd_suspend(device_t dev) { struct xbd_softc *sc = device_get_softc(dev); int retval; int saved_state; /* Prevent new requests being issued until we fix things up. */ mtx_lock(&sc->xbd_io_lock); saved_state = sc->xbd_state; sc->xbd_state = XBD_STATE_SUSPENDED; /* Wait for outstanding I/O to drain. */ retval = 0; while (xbd_queue_length(sc, XBD_Q_BUSY) != 0) { if (msleep(&sc->xbd_cm_q[XBD_Q_BUSY], &sc->xbd_io_lock, PRIBIO, "blkf_susp", 30 * hz) == EWOULDBLOCK) { retval = EBUSY; break; } } mtx_unlock(&sc->xbd_io_lock); if (retval != 0) sc->xbd_state = saved_state; return (retval); } static int xbd_resume(device_t dev) { struct xbd_softc *sc = device_get_softc(dev); if (xen_suspend_cancelled) { sc->xbd_state = XBD_STATE_CONNECTED; return (0); } DPRINTK("xbd_resume: %s\n", xenbus_get_node(dev)); xbd_free(sc); xbd_initialize(sc); return (0); } /** * Callback received when the backend's state changes. */ static void xbd_backend_changed(device_t dev, XenbusState backend_state) { struct xbd_softc *sc = device_get_softc(dev); DPRINTK("backend_state=%d\n", backend_state); switch (backend_state) { case XenbusStateUnknown: case XenbusStateInitialising: case XenbusStateReconfigured: case XenbusStateReconfiguring: case XenbusStateClosed: break; case XenbusStateInitWait: case XenbusStateInitialised: xbd_initialize(sc); break; case XenbusStateConnected: xbd_initialize(sc); xbd_connect(sc); break; case XenbusStateClosing: if (sc->xbd_users > 0) { device_printf(dev, "detaching with pending users\n"); KASSERT(sc->xbd_disk != NULL, ("NULL disk with pending users\n")); disk_gone(sc->xbd_disk); } else { xbd_closing(dev); } break; } } /*---------------------------- NewBus Registration ---------------------------*/ static device_method_t xbd_methods[] = { /* Device interface */ DEVMETHOD(device_probe, xbd_probe), DEVMETHOD(device_attach, xbd_attach), DEVMETHOD(device_detach, xbd_detach), DEVMETHOD(device_shutdown, bus_generic_shutdown), DEVMETHOD(device_suspend, xbd_suspend), DEVMETHOD(device_resume, xbd_resume), /* Xenbus interface */ DEVMETHOD(xenbus_otherend_changed, xbd_backend_changed), { 0, 0 } }; static driver_t xbd_driver = { "xbd", xbd_methods, sizeof(struct xbd_softc), }; devclass_t xbd_devclass; DRIVER_MODULE(xbd, xenbusb_front, xbd_driver, xbd_devclass, 0, 0);