diff --git a/sys/arm/ti/twl/twl.c b/sys/arm/ti/twl/twl.c index 3a7a0ddcb0b1..ee54b0ea1f2c 100644 --- a/sys/arm/ti/twl/twl.c +++ b/sys/arm/ti/twl/twl.c @@ -1,456 +1,456 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2011 * Ben Gray . * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Texas Instruments TWL4030/TWL5030/TWL60x0/TPS659x0 Power Management and * Audio CODEC devices. * * This code is based on the Linux TWL multifunctional device driver, which is * copyright (C) 2005-2006 Texas Instruments, Inc. * * These chips are typically used as support ICs for the OMAP range of embedded * ARM processes/SOC from Texas Instruments. They are typically used to control * on board voltages, however some variants have other features like audio * codecs, USB OTG transceivers, RTC, PWM, etc. * * This driver acts as a bus for more specific companion devices. * */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "arm/ti/twl/twl.h" /* TWL device IDs */ #define TWL_DEVICE_UNKNOWN 0xffff #define TWL_DEVICE_4030 0x4030 #define TWL_DEVICE_6025 0x6025 #define TWL_DEVICE_6030 0x6030 /* Each TWL device typically has more than one I2C address */ #define TWL_MAX_SUBADDRS 4 /* The maximum number of bytes that can be written in one call */ #define TWL_MAX_IIC_DATA_SIZE 63 /* The TWL devices typically use 4 I2C address for the different internal * register sets, plus one SmartReflex I2C address. */ #define TWL_CHIP_ID0 0x48 #define TWL_CHIP_ID1 0x49 #define TWL_CHIP_ID2 0x4A #define TWL_CHIP_ID3 0x4B #define TWL_SMARTREFLEX_CHIP_ID 0x12 #define TWL_INVALID_CHIP_ID 0xff struct twl_softc { device_t sc_dev; struct mtx sc_mtx; unsigned int sc_type; uint8_t sc_subaddr_map[TWL_MAX_SUBADDRS]; struct intr_config_hook sc_scan_hook; device_t sc_vreg; device_t sc_clks; }; /** * Macros for driver mutex locking */ #define TWL_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) #define TWL_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) #define TWL_LOCK_INIT(_sc) \ mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \ "twl", MTX_DEF) #define TWL_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); #define TWL_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); #define TWL_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); /** * twl_is_4030 - returns true if the device is TWL4030 * twl_is_6025 - returns true if the device is TWL6025 * twl_is_6030 - returns true if the device is TWL6030 * @sc: device soft context * * Returns a non-zero value if the device matches. * * RETURNS: * Returns a non-zero value if the device matches, otherwise zero. */ int twl_is_4030(device_t dev) { struct twl_softc *sc = device_get_softc(dev); return (sc->sc_type == TWL_DEVICE_4030); } int twl_is_6025(device_t dev) { struct twl_softc *sc = device_get_softc(dev); return (sc->sc_type == TWL_DEVICE_6025); } int twl_is_6030(device_t dev) { struct twl_softc *sc = device_get_softc(dev); return (sc->sc_type == TWL_DEVICE_6030); } /** * twl_read - read one or more registers from the TWL device * @sc: device soft context * @nsub: the sub-module to read from * @reg: the register offset within the module to read * @buf: buffer to store the bytes in * @cnt: the number of bytes to read * * Reads one or more registers and stores the result in the suppled buffer. * * RETURNS: * Zero on success or an error code on failure. */ int twl_read(device_t dev, uint8_t nsub, uint8_t reg, uint8_t *buf, uint16_t cnt) { struct twl_softc *sc; struct iic_msg msg[2]; uint8_t addr; int rc; sc = device_get_softc(dev); TWL_LOCK(sc); addr = sc->sc_subaddr_map[nsub]; TWL_UNLOCK(sc); if (addr == TWL_INVALID_CHIP_ID) return (EIO); /* Set the address to read from */ msg[0].slave = addr; msg[0].flags = IIC_M_WR | IIC_M_NOSTOP; msg[0].len = 1; msg[0].buf = ® /* Read the data back */ msg[1].slave = addr; msg[1].flags = IIC_M_RD; msg[1].len = cnt; msg[1].buf = buf; rc = iicbus_transfer(dev, msg, 2); if (rc != 0) { device_printf(dev, "iicbus read failed (adr:0x%02x, reg:0x%02x)\n", addr, reg); return (EIO); } return (0); } /** * twl_write - writes one or more registers to the TWL device * @sc: device soft context * @nsub: the sub-module to read from * @reg: the register offset within the module to read * @buf: data to write * @cnt: the number of bytes to write * * Writes one or more registers. * * RETURNS: * Zero on success or a negative error code on failure. */ int twl_write(device_t dev, uint8_t nsub, uint8_t reg, uint8_t *buf, uint16_t cnt) { struct twl_softc *sc; struct iic_msg msg; uint8_t addr; uint8_t tmp_buf[TWL_MAX_IIC_DATA_SIZE + 1]; int rc; if (cnt > TWL_MAX_IIC_DATA_SIZE) return (ENOMEM); /* Set the register address as the first byte */ tmp_buf[0] = reg; memcpy(&tmp_buf[1], buf, cnt); sc = device_get_softc(dev); TWL_LOCK(sc); addr = sc->sc_subaddr_map[nsub]; TWL_UNLOCK(sc); if (addr == TWL_INVALID_CHIP_ID) return (EIO); /* Setup the transfer and execute it */ msg.slave = addr; msg.flags = IIC_M_WR; msg.len = cnt + 1; msg.buf = tmp_buf; rc = iicbus_transfer(dev, &msg, 1); if (rc != 0) { device_printf(sc->sc_dev, "iicbus write failed (adr:0x%02x, reg:0x%02x)\n", addr, reg); return (EIO); } return (0); } /** * twl_test_present - checks if a device with given address is present * @sc: device soft context * @addr: the address of the device to scan for * * Sends just the address byte and checks for an ACK. If no ACK then device * is assumed to not be present. * * RETURNS: * EIO if device is not present, otherwise 0 is returned. */ static int twl_test_present(struct twl_softc *sc, uint8_t addr) { struct iic_msg msg; uint8_t tmp; /* Set the address to read from */ msg.slave = addr; msg.flags = IIC_M_RD; msg.len = 1; msg.buf = &tmp; if (iicbus_transfer(sc->sc_dev, &msg, 1) != 0) return (EIO); return (0); } /** * twl_scan - scans the i2c bus for sub modules * @dev: the twl device * * TWL devices don't just have one i2c slave address, rather they have up to * 5 other addresses, each is for separate modules within the device. This * function scans the bus for 4 possible sub-devices and stores the info * internally. * */ static void twl_scan(void *dev) { struct twl_softc *sc; unsigned i; uint8_t devs[TWL_MAX_SUBADDRS]; uint8_t base = TWL_CHIP_ID0; sc = device_get_softc((device_t)dev); memset(devs, TWL_INVALID_CHIP_ID, TWL_MAX_SUBADDRS); /* Try each of the addresses (0x48, 0x49, 0x4a & 0x4b) to determine which * sub modules we have. */ for (i = 0; i < TWL_MAX_SUBADDRS; i++) { if (twl_test_present(sc, (base + i)) == 0) { devs[i] = (base + i); device_printf(sc->sc_dev, "Found (sub)device at 0x%02x\n", (base + i)); } } TWL_LOCK(sc); memcpy(sc->sc_subaddr_map, devs, TWL_MAX_SUBADDRS); TWL_UNLOCK(sc); /* Finished with the interrupt hook */ config_intrhook_disestablish(&sc->sc_scan_hook); } /** * twl_probe - * @dev: the twl device * * Scans the FDT for a match for the device, possible compatible device * strings are; "ti,twl6030", "ti,twl6025", "ti,twl4030". * * The FDT compat string also determines the type of device (it is currently * not possible to dynamically determine the device type). * */ static int twl_probe(device_t dev) { phandle_t node; const char *compat; int len, l; struct twl_softc *sc; if ((compat = ofw_bus_get_compat(dev)) == NULL) return (ENXIO); if ((node = ofw_bus_get_node(dev)) == 0) return (ENXIO); /* Get total 'compatible' prop len */ if ((len = OF_getproplen(node, "compatible")) <= 0) return (ENXIO); sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_type = TWL_DEVICE_UNKNOWN; while (len > 0) { if (strncasecmp(compat, "ti,twl6030", 10) == 0) sc->sc_type = TWL_DEVICE_6030; else if (strncasecmp(compat, "ti,twl6025", 10) == 0) sc->sc_type = TWL_DEVICE_6025; else if (strncasecmp(compat, "ti,twl4030", 10) == 0) sc->sc_type = TWL_DEVICE_4030; if (sc->sc_type != TWL_DEVICE_UNKNOWN) break; /* Slide to the next sub-string. */ l = strlen(compat) + 1; compat += l; len -= l; } switch (sc->sc_type) { case TWL_DEVICE_4030: device_set_desc(dev, "TI TWL4030/TPS659x0 Companion IC"); break; case TWL_DEVICE_6025: device_set_desc(dev, "TI TWL6025 Companion IC"); break; case TWL_DEVICE_6030: device_set_desc(dev, "TI TWL6030 Companion IC"); break; case TWL_DEVICE_UNKNOWN: default: return (ENXIO); } return (0); } static int twl_attach(device_t dev) { struct twl_softc *sc; sc = device_get_softc(dev); sc->sc_dev = dev; TWL_LOCK_INIT(sc); /* We have to wait until interrupts are enabled. I2C read and write * only works if the interrupts are available. */ sc->sc_scan_hook.ich_func = twl_scan; sc->sc_scan_hook.ich_arg = dev; if (config_intrhook_establish(&sc->sc_scan_hook) != 0) return (ENOMEM); /* FIXME: should be in DTS file */ if ((sc->sc_vreg = device_add_child(dev, "twl_vreg", -1)) == NULL) device_printf(dev, "could not allocate twl_vreg instance\n"); if ((sc->sc_clks = device_add_child(dev, "twl_clks", -1)) == NULL) device_printf(dev, "could not allocate twl_clks instance\n"); bus_attach_children(dev); return (0); } static int twl_detach(device_t dev) { struct twl_softc *sc; + int error; sc = device_get_softc(dev); - if (sc->sc_vreg) - device_delete_child(dev, sc->sc_vreg); - if (sc->sc_clks) - device_delete_child(dev, sc->sc_clks); + error = bus_generic_detach(dev); + if (error != 0) + return (error); TWL_LOCK_DESTROY(sc); return (0); } static device_method_t twl_methods[] = { DEVMETHOD(device_probe, twl_probe), DEVMETHOD(device_attach, twl_attach), DEVMETHOD(device_detach, twl_detach), {0, 0}, }; static driver_t twl_driver = { "twl", twl_methods, sizeof(struct twl_softc), }; DRIVER_MODULE(twl, iicbus, twl_driver, 0, 0); MODULE_VERSION(twl, 1); diff --git a/sys/dev/aac/aac.c b/sys/dev/aac/aac.c index 12f5eb50426b..912b3dc903e3 100644 --- a/sys/dev/aac/aac.c +++ b/sys/dev/aac/aac.c @@ -1,3811 +1,3809 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000 Michael Smith * Copyright (c) 2001 Scott Long * Copyright (c) 2000 BSDi * Copyright (c) 2001 Adaptec, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Driver for the Adaptec 'FSA' family of PCI/SCSI RAID adapters. */ #define AAC_DRIVERNAME "aac" #include "opt_aac.h" /* #include */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static void aac_startup(void *arg); static void aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f); static void aac_get_bus_info(struct aac_softc *sc); static void aac_daemon(void *arg); /* Command Processing */ static void aac_timeout(struct aac_softc *sc); static void aac_complete(void *context, int pending); static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp); static void aac_bio_complete(struct aac_command *cm); static int aac_wait_command(struct aac_command *cm); static void aac_command_thread(struct aac_softc *sc); /* Command Buffer Management */ static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error); static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int aac_alloc_commands(struct aac_softc *sc); static void aac_free_commands(struct aac_softc *sc); static void aac_unmap_command(struct aac_command *cm); /* Hardware Interface */ static int aac_alloc(struct aac_softc *sc); static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int aac_check_firmware(struct aac_softc *sc); static int aac_init(struct aac_softc *sc); static int aac_sync_command(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, u_int32_t *sp); static int aac_setup_intr(struct aac_softc *sc); static int aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm); static int aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, struct aac_fib **fib_addr); static int aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib); /* StrongARM interface */ static int aac_sa_get_fwstatus(struct aac_softc *sc); static void aac_sa_qnotify(struct aac_softc *sc, int qbit); static int aac_sa_get_istatus(struct aac_softc *sc); static void aac_sa_clear_istatus(struct aac_softc *sc, int mask); static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_sa_get_mailbox(struct aac_softc *sc, int mb); static void aac_sa_set_interrupts(struct aac_softc *sc, int enable); const struct aac_interface aac_sa_interface = { aac_sa_get_fwstatus, aac_sa_qnotify, aac_sa_get_istatus, aac_sa_clear_istatus, aac_sa_set_mailbox, aac_sa_get_mailbox, aac_sa_set_interrupts, NULL, NULL, NULL }; /* i960Rx interface */ static int aac_rx_get_fwstatus(struct aac_softc *sc); static void aac_rx_qnotify(struct aac_softc *sc, int qbit); static int aac_rx_get_istatus(struct aac_softc *sc); static void aac_rx_clear_istatus(struct aac_softc *sc, int mask); static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_rx_get_mailbox(struct aac_softc *sc, int mb); static void aac_rx_set_interrupts(struct aac_softc *sc, int enable); static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm); static int aac_rx_get_outb_queue(struct aac_softc *sc); static void aac_rx_set_outb_queue(struct aac_softc *sc, int index); const struct aac_interface aac_rx_interface = { aac_rx_get_fwstatus, aac_rx_qnotify, aac_rx_get_istatus, aac_rx_clear_istatus, aac_rx_set_mailbox, aac_rx_get_mailbox, aac_rx_set_interrupts, aac_rx_send_command, aac_rx_get_outb_queue, aac_rx_set_outb_queue }; /* Rocket/MIPS interface */ static int aac_rkt_get_fwstatus(struct aac_softc *sc); static void aac_rkt_qnotify(struct aac_softc *sc, int qbit); static int aac_rkt_get_istatus(struct aac_softc *sc); static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask); static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb); static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable); static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm); static int aac_rkt_get_outb_queue(struct aac_softc *sc); static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index); const struct aac_interface aac_rkt_interface = { aac_rkt_get_fwstatus, aac_rkt_qnotify, aac_rkt_get_istatus, aac_rkt_clear_istatus, aac_rkt_set_mailbox, aac_rkt_get_mailbox, aac_rkt_set_interrupts, aac_rkt_send_command, aac_rkt_get_outb_queue, aac_rkt_set_outb_queue }; /* Debugging and Diagnostics */ static void aac_describe_controller(struct aac_softc *sc); static const char *aac_describe_code(const struct aac_code_lookup *table, u_int32_t code); /* Management Interface */ static d_open_t aac_open; static d_ioctl_t aac_ioctl; static d_poll_t aac_poll; static void aac_cdevpriv_dtor(void *arg); static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib); static int aac_rev_check(struct aac_softc *sc, caddr_t udata); static int aac_open_aif(struct aac_softc *sc, caddr_t arg); static int aac_close_aif(struct aac_softc *sc, caddr_t arg); static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); static int aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr); static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); static int aac_supported_features(struct aac_softc *sc, caddr_t uptr); static void aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg); static struct aac_mntinforesp * aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid); static struct cdevsw aac_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = aac_open, .d_ioctl = aac_ioctl, .d_poll = aac_poll, .d_name = "aac", }; static MALLOC_DEFINE(M_AACBUF, "aacbuf", "Buffers for the AAC driver"); /* sysctl node */ SYSCTL_NODE(_hw, OID_AUTO, aac, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "AAC driver parameters"); /* * Device Interface */ /* * Initialize the controller and softc */ int aac_attach(struct aac_softc *sc) { int error, unit; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Initialize per-controller queues. */ aac_initq_free(sc); aac_initq_ready(sc); aac_initq_busy(sc); aac_initq_bio(sc); /* * Initialize command-completion task. */ TASK_INIT(&sc->aac_task_complete, 0, aac_complete, sc); /* mark controller as suspended until we get ourselves organised */ sc->aac_state |= AAC_STATE_SUSPEND; /* * Check that the firmware on the card is supported. */ if ((error = aac_check_firmware(sc)) != 0) return(error); /* * Initialize locks */ mtx_init(&sc->aac_aifq_lock, "AAC AIF lock", NULL, MTX_DEF); mtx_init(&sc->aac_io_lock, "AAC I/O lock", NULL, MTX_DEF); mtx_init(&sc->aac_container_lock, "AAC container lock", NULL, MTX_DEF); TAILQ_INIT(&sc->aac_container_tqh); TAILQ_INIT(&sc->aac_ev_cmfree); /* Initialize the clock daemon callout. */ callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0); /* * Initialize the adapter. */ if ((error = aac_alloc(sc)) != 0) return(error); if ((error = aac_init(sc)) != 0) return(error); /* * Allocate and connect our interrupt. */ if ((error = aac_setup_intr(sc)) != 0) return(error); /* * Print a little information about the controller. */ aac_describe_controller(sc); /* * Add sysctls. */ SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->aac_dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->aac_dev)), OID_AUTO, "firmware_build", CTLFLAG_RD, &sc->aac_revision.buildNumber, 0, "firmware build number"); /* * Register to probe our containers later. */ sc->aac_ich.ich_func = aac_startup; sc->aac_ich.ich_arg = sc; if (config_intrhook_establish(&sc->aac_ich) != 0) { device_printf(sc->aac_dev, "can't establish configuration hook\n"); return(ENXIO); } /* * Make the control device. */ unit = device_get_unit(sc->aac_dev); sc->aac_dev_t = make_dev(&aac_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, "aac%d", unit); (void)make_dev_alias(sc->aac_dev_t, "afa%d", unit); (void)make_dev_alias(sc->aac_dev_t, "hpn%d", unit); sc->aac_dev_t->si_drv1 = sc; /* Create the AIF thread */ if (kproc_create((void(*)(void *))aac_command_thread, sc, &sc->aifthread, 0, 0, "aac%daif", unit)) panic("Could not create AIF thread"); /* Register the shutdown method to only be called post-dump */ if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aac_shutdown, sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) device_printf(sc->aac_dev, "shutdown event registration failed\n"); /* Register with CAM for the non-DASD devices */ if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) { TAILQ_INIT(&sc->aac_sim_tqh); aac_get_bus_info(sc); } mtx_lock(&sc->aac_io_lock); callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc); mtx_unlock(&sc->aac_io_lock); return(0); } static void aac_daemon(void *arg) { struct timeval tv; struct aac_softc *sc; struct aac_fib *fib; sc = arg; mtx_assert(&sc->aac_io_lock, MA_OWNED); if (callout_pending(&sc->aac_daemontime) || callout_active(&sc->aac_daemontime) == 0) return; getmicrotime(&tv); aac_alloc_sync_fib(sc, &fib); *(uint32_t *)fib->data = tv.tv_sec; aac_sync_fib(sc, SendHostTime, 0, fib, sizeof(uint32_t)); aac_release_sync_fib(sc); callout_schedule(&sc->aac_daemontime, 30 * 60 * hz); } void aac_add_event(struct aac_softc *sc, struct aac_event *event) { switch (event->ev_type & AAC_EVENT_MASK) { case AAC_EVENT_CMFREE: TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); break; default: device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", event->ev_type); break; } } /* * Request information of container #cid */ static struct aac_mntinforesp * aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid) { struct aac_mntinfo *mi; mi = (struct aac_mntinfo *)&fib->data[0]; /* use 64-bit LBA if enabled */ mi->Command = (sc->flags & AAC_FLAGS_LBA_64BIT) ? VM_NameServe64 : VM_NameServe; mi->MntType = FT_FILESYS; mi->MntCount = cid; if (aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_mntinfo))) { device_printf(sc->aac_dev, "Error probing container %d\n", cid); return (NULL); } return ((struct aac_mntinforesp *)&fib->data[0]); } /* * Probe for containers, create disks. */ static void aac_startup(void *arg) { struct aac_softc *sc; struct aac_fib *fib; struct aac_mntinforesp *mir; int count = 0, i = 0; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); /* loop over possible containers */ do { if ((mir = aac_get_container_info(sc, fib, i)) == NULL) continue; if (i == 0) count = mir->MntRespCount; aac_add_container(sc, mir, 0); i++; } while ((i < count) && (i < AAC_MAX_CONTAINERS)); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); /* mark the controller up */ sc->aac_state &= ~AAC_STATE_SUSPEND; /* poke the bus to actually attach the child devices */ bus_attach_children(sc->aac_dev); /* disconnect ourselves from the intrhook chain */ config_intrhook_disestablish(&sc->aac_ich); /* enable interrupts now */ AAC_UNMASK_INTERRUPTS(sc); } /* * Create a device to represent a new container */ static void aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f) { struct aac_container *co; device_t child; /* * Check container volume type for validity. Note that many of * the possible types may never show up. */ if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { co = (struct aac_container *)malloc(sizeof *co, M_AACBUF, M_NOWAIT | M_ZERO); if (co == NULL) panic("Out of memory?!"); fwprintf(sc, HBA_FLAGS_DBG_INIT_B, "id %x name '%.16s' size %u type %d", mir->MntTable[0].ObjectId, mir->MntTable[0].FileSystemName, mir->MntTable[0].Capacity, mir->MntTable[0].VolType); if ((child = device_add_child(sc->aac_dev, "aacd", -1)) == NULL) device_printf(sc->aac_dev, "device_add_child failed\n"); else device_set_ivars(child, co); device_set_desc(child, aac_describe_code(aac_container_types, mir->MntTable[0].VolType)); co->co_disk = child; co->co_found = f; bcopy(&mir->MntTable[0], &co->co_mntobj, sizeof(struct aac_mntobj)); mtx_lock(&sc->aac_container_lock); TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); mtx_unlock(&sc->aac_container_lock); } } /* * Allocate resources associated with (sc) */ static int aac_alloc(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Create DMA tag for mapping buffers into controller-addressable space. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_SG_64BIT) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->aac_max_sectors << 9, /* maxsize */ sc->aac_sg_tablesize, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->aac_io_lock, /* lockfuncarg */ &sc->aac_buffer_dmat)) { device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); return (ENOMEM); } /* * Create DMA tag for mapping FIBs into controller-addressable space.. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_4GB_WINDOW) ? BUS_SPACE_MAXADDR_32BIT : 0x7fffffff, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ sc->aac_max_fibs_alloc * sc->aac_max_fib_size, /* maxsize */ 1, /* nsegments */ sc->aac_max_fibs_alloc * sc->aac_max_fib_size, /* maxsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &sc->aac_fib_dmat)) { device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n"); return (ENOMEM); } /* * Create DMA tag for the common structure and allocate it. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_4GB_WINDOW) ? BUS_SPACE_MAXADDR_32BIT : 0x7fffffff, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ 8192 + sizeof(struct aac_common), /* maxsize */ 1, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &sc->aac_common_dmat)) { device_printf(sc->aac_dev, "can't allocate common structure DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { device_printf(sc->aac_dev, "can't allocate common structure\n"); return (ENOMEM); } /* * Work around a bug in the 2120 and 2200 that cannot DMA commands * below address 8192 in physical memory. * XXX If the padding is not needed, can it be put to use instead * of ignored? */ (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, sc->aac_common, 8192 + sizeof(*sc->aac_common), aac_common_map, sc, 0); if (sc->aac_common_busaddr < 8192) { sc->aac_common = (struct aac_common *) ((uint8_t *)sc->aac_common + 8192); sc->aac_common_busaddr += 8192; } bzero(sc->aac_common, sizeof(*sc->aac_common)); /* Allocate some FIBs and associated command structs */ TAILQ_INIT(&sc->aac_fibmap_tqh); sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command), M_AACBUF, M_WAITOK|M_ZERO); while (sc->total_fibs < sc->aac_max_fibs) { if (aac_alloc_commands(sc) != 0) break; } if (sc->total_fibs == 0) return (ENOMEM); return (0); } /* * Free all of the resources associated with (sc) * * Should not be called if the controller is active. */ void aac_free(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* remove the control device */ if (sc->aac_dev_t != NULL) destroy_dev(sc->aac_dev_t); /* throw away any FIB buffers, discard the FIB DMA tag */ aac_free_commands(sc); if (sc->aac_fib_dmat) bus_dma_tag_destroy(sc->aac_fib_dmat); free(sc->aac_commands, M_AACBUF); /* destroy the common area */ if (sc->aac_common) { bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, sc->aac_common_dmamap); } if (sc->aac_common_dmat) bus_dma_tag_destroy(sc->aac_common_dmat); /* disconnect the interrupt handler */ if (sc->aac_intr) bus_teardown_intr(sc->aac_dev, sc->aac_irq, sc->aac_intr); if (sc->aac_irq != NULL) { bus_release_resource(sc->aac_dev, SYS_RES_IRQ, rman_get_rid(sc->aac_irq), sc->aac_irq); pci_release_msi(sc->aac_dev); } /* destroy data-transfer DMA tag */ if (sc->aac_buffer_dmat) bus_dma_tag_destroy(sc->aac_buffer_dmat); /* destroy the parent DMA tag */ if (sc->aac_parent_dmat) bus_dma_tag_destroy(sc->aac_parent_dmat); /* release the register window mapping */ if (sc->aac_regs_res0 != NULL) bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rman_get_rid(sc->aac_regs_res0), sc->aac_regs_res0); if (sc->aac_hwif == AAC_HWIF_NARK && sc->aac_regs_res1 != NULL) bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rman_get_rid(sc->aac_regs_res1), sc->aac_regs_res1); } /* * Disconnect from the controller completely, in preparation for unload. */ int aac_detach(device_t dev) { struct aac_softc *sc; struct aac_container *co; struct aac_sim *sim; int error; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + error = bus_generic_detach(dev); + if (error != 0) + return (error); + callout_drain(&sc->aac_daemontime); mtx_lock(&sc->aac_io_lock); while (sc->aifflags & AAC_AIFFLAGS_RUNNING) { sc->aifflags |= AAC_AIFFLAGS_EXIT; wakeup(sc->aifthread); msleep(sc->aac_dev, &sc->aac_io_lock, PUSER, "aacdch", 0); } mtx_unlock(&sc->aac_io_lock); KASSERT((sc->aifflags & AAC_AIFFLAGS_RUNNING) == 0, ("%s: invalid detach state", __func__)); /* Remove the child containers */ while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { - error = device_delete_child(dev, co->co_disk); - if (error) - return (error); TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); free(co, M_AACBUF); } /* Remove the CAM SIMs */ while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); - error = device_delete_child(dev, sim->sim_dev); - if (error) - return (error); free(sim, M_AACBUF); } if ((error = aac_shutdown(dev))) return(error); EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); aac_free(sc); mtx_destroy(&sc->aac_aifq_lock); mtx_destroy(&sc->aac_io_lock); mtx_destroy(&sc->aac_container_lock); return(0); } /* * Bring the controller down to a dormant state and detach all child devices. * * This function is called before detach or system shutdown. * * Note that we can assume that the bioq on the controller is empty, as we won't * allow shutdown if any device is open. */ int aac_shutdown(device_t dev) { struct aac_softc *sc; struct aac_fib *fib; struct aac_close_command *cc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state |= AAC_STATE_SUSPEND; /* * Send a Container shutdown followed by a HostShutdown FIB to the * controller to convince it that we don't want to talk to it anymore. * We've been closed and all I/O completed already */ device_printf(sc->aac_dev, "shutting down controller..."); mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); cc = (struct aac_close_command *)&fib->data[0]; bzero(cc, sizeof(struct aac_close_command)); cc->Command = VM_CloseAll; cc->ContainerId = 0xffffffff; if (aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_close_command))) printf("FAILED.\n"); else printf("done\n"); #if 0 else { fib->data[0] = 0; /* * XXX Issuing this command to the controller makes it shut down * but also keeps it from coming back up without a reset of the * PCI bus. This is not desirable if you are just unloading the * driver module with the intent to reload it later. */ if (aac_sync_fib(sc, FsaHostShutdown, AAC_FIBSTATE_SHUTDOWN, fib, 1)) { printf("FAILED.\n"); } else { printf("done.\n"); } } #endif AAC_MASK_INTERRUPTS(sc); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return(0); } /* * Bring the controller to a quiescent state, ready for system suspend. */ int aac_suspend(device_t dev) { struct aac_softc *sc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state |= AAC_STATE_SUSPEND; AAC_MASK_INTERRUPTS(sc); return(0); } /* * Bring the controller back to a state ready for operation. */ int aac_resume(device_t dev) { struct aac_softc *sc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state &= ~AAC_STATE_SUSPEND; AAC_UNMASK_INTERRUPTS(sc); return(0); } /* * Interrupt handler for NEW_COMM interface. */ void aac_new_intr(void *arg) { struct aac_softc *sc; u_int32_t index, fast; struct aac_command *cm; struct aac_fib *fib; int i; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); while (1) { index = AAC_GET_OUTB_QUEUE(sc); if (index == 0xffffffff) index = AAC_GET_OUTB_QUEUE(sc); if (index == 0xffffffff) break; if (index & 2) { if (index == 0xfffffffe) { /* XXX This means that the controller wants * more work. Ignore it for now. */ continue; } /* AIF */ fib = (struct aac_fib *)malloc(sizeof *fib, M_AACBUF, M_NOWAIT | M_ZERO); if (fib == NULL) { /* If we're really this short on memory, * hopefully breaking out of the handler will * allow something to get freed. This * actually sucks a whole lot. */ break; } index &= ~2; for (i = 0; i < sizeof(struct aac_fib)/4; ++i) ((u_int32_t *)fib)[i] = AAC_MEM1_GETREG4(sc, index + i*4); aac_handle_aif(sc, fib); free(fib, M_AACBUF); /* * AIF memory is owned by the adapter, so let it * know that we are done with it. */ AAC_SET_OUTB_QUEUE(sc, index); AAC_CLEAR_ISTATUS(sc, AAC_DB_RESPONSE_READY); } else { fast = index & 1; cm = sc->aac_commands + (index >> 2); fib = cm->cm_fib; if (fast) { fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; *((u_int32_t *)(fib->data)) = AAC_ERROR_NORMAL; } aac_remove_busy(cm); aac_unmap_command(cm); cm->cm_flags |= AAC_CMD_COMPLETED; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this * command */ wakeup(cm); } sc->flags &= ~AAC_QUEUE_FRZN; } } /* see if we can start some more I/O */ if ((sc->flags & AAC_QUEUE_FRZN) == 0) aac_startio(sc); mtx_unlock(&sc->aac_io_lock); } /* * Interrupt filter for !NEW_COMM interface. */ int aac_filter(void *arg) { struct aac_softc *sc; u_int16_t reason; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Read the status register directly. This is faster than taking the * driver lock and reading the queues directly. It also saves having * to turn parts of the driver lock into a spin mutex, which would be * ugly. */ reason = AAC_GET_ISTATUS(sc); AAC_CLEAR_ISTATUS(sc, reason); /* handle completion processing */ if (reason & AAC_DB_RESPONSE_READY) taskqueue_enqueue(taskqueue_fast, &sc->aac_task_complete); /* controller wants to talk to us */ if (reason & (AAC_DB_PRINTF | AAC_DB_COMMAND_READY)) { /* * XXX Make sure that we don't get fooled by strange messages * that start with a NULL. */ if ((reason & AAC_DB_PRINTF) && (sc->aac_common->ac_printf[0] == 0)) sc->aac_common->ac_printf[0] = 32; /* * This might miss doing the actual wakeup. However, the * msleep that this is waking up has a timeout, so it will * wake up eventually. AIFs and printfs are low enough * priority that they can handle hanging out for a few seconds * if needed. */ wakeup(sc->aifthread); } return (FILTER_HANDLED); } /* * Command Processing */ /* * Start as much queued I/O as possible on the controller */ void aac_startio(struct aac_softc *sc) { struct aac_command *cm; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); for (;;) { /* * This flag might be set if the card is out of resources. * Checking it here prevents an infinite loop of deferrals. */ if (sc->flags & AAC_QUEUE_FRZN) break; /* * Try to get a command that's been put off for lack of * resources */ cm = aac_dequeue_ready(sc); /* * Try to build a command off the bio queue (ignore error * return) */ if (cm == NULL) aac_bio_command(sc, &cm); /* nothing to do? */ if (cm == NULL) break; /* don't map more than once */ if (cm->cm_flags & AAC_CMD_MAPPED) panic("aac: command %p already mapped", cm); /* * Set up the command to go to the controller. If there are no * data buffers associated with the command then it can bypass * busdma. */ if (cm->cm_datalen != 0) { if (cm->cm_flags & AAC_REQ_BIO) error = bus_dmamap_load_bio( sc->aac_buffer_dmat, cm->cm_datamap, (struct bio *)cm->cm_private, aac_map_command_sg, cm, 0); else error = bus_dmamap_load(sc->aac_buffer_dmat, cm->cm_datamap, cm->cm_data, cm->cm_datalen, aac_map_command_sg, cm, 0); if (error == EINPROGRESS) { fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "freezing queue\n"); sc->flags |= AAC_QUEUE_FRZN; } else if (error != 0) panic("aac_startio: unexpected error %d from " "busdma", error); } else aac_map_command_sg(cm, NULL, 0, 0); } } /* * Handle notification of one or more FIBs coming from the controller. */ static void aac_command_thread(struct aac_softc *sc) { struct aac_fib *fib; u_int32_t fib_size; int size, retval; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); sc->aifflags = AAC_AIFFLAGS_RUNNING; while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { retval = 0; if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO, "aifthd", AAC_PERIODIC_INTERVAL * hz); /* * First see if any FIBs need to be allocated. This needs * to be called without the driver lock because contigmalloc * can sleep. */ if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { mtx_unlock(&sc->aac_io_lock); aac_alloc_commands(sc); mtx_lock(&sc->aac_io_lock); sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; aac_startio(sc); } /* * While we're here, check to see if any commands are stuck. * This is pretty low-priority, so it's ok if it doesn't * always fire. */ if (retval == EWOULDBLOCK) aac_timeout(sc); /* Check the hardware printf message buffer */ if (sc->aac_common->ac_printf[0] != 0) aac_print_printf(sc); /* Also check to see if the adapter has a command for us. */ if (sc->flags & AAC_FLAGS_NEW_COMM) continue; for (;;) { if (aac_dequeue_fib(sc, AAC_HOST_NORM_CMD_QUEUE, &fib_size, &fib)) break; AAC_PRINT_FIB(sc, fib); switch (fib->Header.Command) { case AifRequest: aac_handle_aif(sc, fib); break; default: device_printf(sc->aac_dev, "unknown command " "from controller\n"); break; } if ((fib->Header.XferState == 0) || (fib->Header.StructType != AAC_FIBTYPE_TFIB)) { break; } /* Return the AIF to the controller. */ if (fib->Header.XferState & AAC_FIBSTATE_FROMADAP) { fib->Header.XferState |= AAC_FIBSTATE_DONEHOST; *(AAC_FSAStatus*)fib->data = ST_OK; /* XXX Compute the Size field? */ size = fib->Header.Size; if (size > sizeof(struct aac_fib)) { size = sizeof(struct aac_fib); fib->Header.Size = size; } /* * Since we did not generate this command, it * cannot go through the normal * enqueue->startio chain. */ aac_enqueue_response(sc, AAC_ADAP_NORM_RESP_QUEUE, fib); } } } sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; mtx_unlock(&sc->aac_io_lock); wakeup(sc->aac_dev); kproc_exit(0); } /* * Process completed commands. */ static void aac_complete(void *context, int pending) { struct aac_softc *sc; struct aac_command *cm; struct aac_fib *fib; u_int32_t fib_size; sc = (struct aac_softc *)context; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); /* pull completed commands off the queue */ for (;;) { /* look for completed FIBs on our queue */ if (aac_dequeue_fib(sc, AAC_HOST_NORM_RESP_QUEUE, &fib_size, &fib)) break; /* nothing to do */ /* get the command, unmap and hand off for processing */ cm = sc->aac_commands + fib->Header.SenderData; if (cm == NULL) { AAC_PRINT_FIB(sc, fib); break; } if ((cm->cm_flags & AAC_CMD_TIMEDOUT) != 0) device_printf(sc->aac_dev, "COMMAND %p COMPLETED AFTER %d SECONDS\n", cm, (int)(time_uptime-cm->cm_timestamp)); aac_remove_busy(cm); aac_unmap_command(cm); cm->cm_flags |= AAC_CMD_COMPLETED; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this command */ wakeup(cm); } } /* see if we can start some more I/O */ sc->flags &= ~AAC_QUEUE_FRZN; aac_startio(sc); mtx_unlock(&sc->aac_io_lock); } /* * Handle a bio submitted from a disk device. */ void aac_submit_bio(struct bio *bp) { struct aac_disk *ad; struct aac_softc *sc; ad = (struct aac_disk *)bp->bio_disk->d_drv1; sc = ad->ad_controller; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* queue the BIO and try to get some work done */ aac_enqueue_bio(sc, bp); aac_startio(sc); } /* * Get a bio and build a command to go with it. */ static int aac_bio_command(struct aac_softc *sc, struct aac_command **cmp) { struct aac_command *cm; struct aac_fib *fib; struct aac_disk *ad; struct bio *bp; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* get the resources we will need */ cm = NULL; bp = NULL; if (aac_alloc_command(sc, &cm)) /* get a command */ goto fail; if ((bp = aac_dequeue_bio(sc)) == NULL) goto fail; /* fill out the command */ cm->cm_datalen = bp->bio_bcount; cm->cm_complete = aac_bio_complete; cm->cm_flags = AAC_REQ_BIO; cm->cm_private = bp; cm->cm_timestamp = time_uptime; /* build the FIB */ fib = cm->cm_fib; fib->Header.Size = sizeof(struct aac_fib_header); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC | AAC_FIBSTATE_FAST_RESPONSE; /* build the read/write request */ ad = (struct aac_disk *)bp->bio_disk->d_drv1; if (sc->flags & AAC_FLAGS_RAW_IO) { struct aac_raw_io *raw; raw = (struct aac_raw_io *)&fib->data[0]; fib->Header.Command = RawIo; raw->BlockNumber = (u_int64_t)bp->bio_pblkno; raw->ByteCount = bp->bio_bcount; raw->ContainerId = ad->ad_container->co_mntobj.ObjectId; raw->BpTotal = 0; raw->BpComplete = 0; fib->Header.Size += sizeof(struct aac_raw_io); cm->cm_sgtable = (struct aac_sg_table *)&raw->SgMapRaw; if (bp->bio_cmd == BIO_READ) { raw->Flags = 1; cm->cm_flags |= AAC_CMD_DATAIN; } else { raw->Flags = 0; cm->cm_flags |= AAC_CMD_DATAOUT; } } else if ((sc->flags & AAC_FLAGS_SG_64BIT) == 0) { fib->Header.Command = ContainerCommand; if (bp->bio_cmd == BIO_READ) { struct aac_blockread *br; br = (struct aac_blockread *)&fib->data[0]; br->Command = VM_CtBlockRead; br->ContainerId = ad->ad_container->co_mntobj.ObjectId; br->BlockNumber = bp->bio_pblkno; br->ByteCount = bp->bio_bcount; fib->Header.Size += sizeof(struct aac_blockread); cm->cm_sgtable = &br->SgMap; cm->cm_flags |= AAC_CMD_DATAIN; } else { struct aac_blockwrite *bw; bw = (struct aac_blockwrite *)&fib->data[0]; bw->Command = VM_CtBlockWrite; bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; bw->BlockNumber = bp->bio_pblkno; bw->ByteCount = bp->bio_bcount; bw->Stable = CUNSTABLE; fib->Header.Size += sizeof(struct aac_blockwrite); cm->cm_flags |= AAC_CMD_DATAOUT; cm->cm_sgtable = &bw->SgMap; } } else { fib->Header.Command = ContainerCommand64; if (bp->bio_cmd == BIO_READ) { struct aac_blockread64 *br; br = (struct aac_blockread64 *)&fib->data[0]; br->Command = VM_CtHostRead64; br->ContainerId = ad->ad_container->co_mntobj.ObjectId; br->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; br->BlockNumber = bp->bio_pblkno; br->Pad = 0; br->Flags = 0; fib->Header.Size += sizeof(struct aac_blockread64); cm->cm_flags |= AAC_CMD_DATAIN; cm->cm_sgtable = (struct aac_sg_table *)&br->SgMap64; } else { struct aac_blockwrite64 *bw; bw = (struct aac_blockwrite64 *)&fib->data[0]; bw->Command = VM_CtHostWrite64; bw->ContainerId = ad->ad_container->co_mntobj.ObjectId; bw->SectorCount = bp->bio_bcount / AAC_BLOCK_SIZE; bw->BlockNumber = bp->bio_pblkno; bw->Pad = 0; bw->Flags = 0; fib->Header.Size += sizeof(struct aac_blockwrite64); cm->cm_flags |= AAC_CMD_DATAOUT; cm->cm_sgtable = (struct aac_sg_table *)&bw->SgMap64; } } *cmp = cm; return(0); fail: if (bp != NULL) aac_enqueue_bio(sc, bp); if (cm != NULL) aac_release_command(cm); return(ENOMEM); } /* * Handle a bio-instigated command that has been completed. */ static void aac_bio_complete(struct aac_command *cm) { struct aac_blockread_response *brr; struct aac_blockwrite_response *bwr; struct bio *bp; AAC_FSAStatus status; /* fetch relevant status and then release the command */ bp = (struct bio *)cm->cm_private; if (bp->bio_cmd == BIO_READ) { brr = (struct aac_blockread_response *)&cm->cm_fib->data[0]; status = brr->Status; } else { bwr = (struct aac_blockwrite_response *)&cm->cm_fib->data[0]; status = bwr->Status; } aac_release_command(cm); /* fix up the bio based on status */ if (status == ST_OK) { bp->bio_resid = 0; } else { bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; } aac_biodone(bp); } /* * Submit a command to the controller, return when it completes. * XXX This is very dangerous! If the card has gone out to lunch, we could * be stuck here forever. At the same time, signals are not caught * because there is a risk that a signal could wakeup the sleep before * the card has a chance to complete the command. Since there is no way * to cancel a command that is in progress, we can't protect against the * card completing a command late and spamming the command and data * memory. So, we are held hostage until the command completes. */ static int aac_wait_command(struct aac_command *cm) { struct aac_softc *sc; int error; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* Put the command on the ready queue and get things going */ aac_enqueue_ready(cm); aac_startio(sc); error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacwait", 0); return(error); } /* *Command Buffer Management */ /* * Allocate a command. */ int aac_alloc_command(struct aac_softc *sc, struct aac_command **cmp) { struct aac_command *cm; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if ((cm = aac_dequeue_free(sc)) == NULL) { if (sc->total_fibs < sc->aac_max_fibs) { mtx_lock(&sc->aac_io_lock); sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; mtx_unlock(&sc->aac_io_lock); wakeup(sc->aifthread); } return (EBUSY); } *cmp = cm; return(0); } /* * Release a command back to the freelist. */ void aac_release_command(struct aac_command *cm) { struct aac_event *event; struct aac_softc *sc; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* (re)initialize the command/FIB */ cm->cm_datalen = 0; cm->cm_sgtable = NULL; cm->cm_flags = 0; cm->cm_complete = NULL; cm->cm_private = NULL; cm->cm_queue = AAC_ADAP_NORM_CMD_QUEUE; cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; cm->cm_fib->Header.Flags = 0; cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; /* * These are duplicated in aac_start to cover the case where an * intermediate stage may have destroyed them. They're left * initialized here for debugging purposes only. */ cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; cm->cm_fib->Header.SenderData = 0; aac_enqueue_free(cm); if ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); event->ev_callback(sc, event, event->ev_arg); } } /* * Map helper for command/FIB allocation. */ static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { uint64_t *fibphys; fibphys = (uint64_t *)arg; *fibphys = segs[0].ds_addr; } /* * Allocate and initialize commands/FIBs for this adapter. */ static int aac_alloc_commands(struct aac_softc *sc) { struct aac_command *cm; struct aac_fibmap *fm; uint64_t fibphys; int i, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) return (ENOMEM); fm = malloc(sizeof(struct aac_fibmap), M_AACBUF, M_NOWAIT|M_ZERO); if (fm == NULL) return (ENOMEM); /* allocate the FIBs in DMAable memory and load them */ if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, BUS_DMA_NOWAIT, &fm->aac_fibmap)) { device_printf(sc->aac_dev, "Not enough contiguous memory available.\n"); free(fm, M_AACBUF); return (ENOMEM); } /* Ignore errors since this doesn't bounce */ (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size, aac_map_command_helper, &fibphys, 0); /* initialize constant fields in the command structure */ bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * sc->aac_max_fib_size); for (i = 0; i < sc->aac_max_fibs_alloc; i++) { cm = sc->aac_commands + sc->total_fibs; fm->aac_commands = cm; cm->cm_sc = sc; cm->cm_fib = (struct aac_fib *) ((u_int8_t *)fm->aac_fibs + i*sc->aac_max_fib_size); cm->cm_fibphys = fibphys + i*sc->aac_max_fib_size; cm->cm_index = sc->total_fibs; if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, &cm->cm_datamap)) != 0) break; mtx_lock(&sc->aac_io_lock); aac_release_command(cm); sc->total_fibs++; mtx_unlock(&sc->aac_io_lock); } if (i > 0) { mtx_lock(&sc->aac_io_lock); TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); mtx_unlock(&sc->aac_io_lock); return (0); } bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); free(fm, M_AACBUF); return (ENOMEM); } /* * Free FIBs owned by this adapter. */ static void aac_free_commands(struct aac_softc *sc) { struct aac_fibmap *fm; struct aac_command *cm; int i; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); /* * We check against total_fibs to handle partially * allocated blocks. */ for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { cm = fm->aac_commands + i; bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); } bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); free(fm, M_AACBUF); } } /* * Command-mapping helper function - populate this command's s/g table. */ static void aac_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aac_softc *sc; struct aac_command *cm; struct aac_fib *fib; int i; cm = (struct aac_command *)arg; sc = cm->cm_sc; fib = cm->cm_fib; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* copy into the FIB */ if (cm->cm_sgtable != NULL) { if (fib->Header.Command == RawIo) { struct aac_sg_tableraw *sg; sg = (struct aac_sg_tableraw *)cm->cm_sgtable; sg->SgCount = nseg; for (i = 0; i < nseg; i++) { sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; sg->SgEntryRaw[i].Next = 0; sg->SgEntryRaw[i].Prev = 0; sg->SgEntryRaw[i].Flags = 0; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { struct aac_sg_table *sg; sg = cm->cm_sgtable; sg->SgCount = nseg; for (i = 0; i < nseg; i++) { sg->SgEntry[i].SgAddress = segs[i].ds_addr; sg->SgEntry[i].SgByteCount = segs[i].ds_len; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entry); } else { struct aac_sg_table64 *sg; sg = (struct aac_sg_table64 *)cm->cm_sgtable; sg->SgCount = nseg; for (i = 0; i < nseg; i++) { sg->SgEntry64[i].SgAddress = segs[i].ds_addr; sg->SgEntry64[i].SgByteCount = segs[i].ds_len; } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); } } /* Fix up the address values in the FIB. Use the command array index * instead of a pointer since these fields are only 32 bits. Shift * the SenderFibAddress over to make room for the fast response bit * and for the AIF bit */ cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); cm->cm_fib->Header.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; /* save a pointer to the command for speedy reverse-lookup */ cm->cm_fib->Header.SenderData = cm->cm_index; if (cm->cm_flags & AAC_CMD_DATAIN) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_PREREAD); if (cm->cm_flags & AAC_CMD_DATAOUT) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_PREWRITE); cm->cm_flags |= AAC_CMD_MAPPED; if (sc->flags & AAC_FLAGS_NEW_COMM) { int count = 10000000L; while (AAC_SEND_COMMAND(sc, cm) != 0) { if (--count == 0) { aac_unmap_command(cm); sc->flags |= AAC_QUEUE_FRZN; aac_requeue_ready(cm); } DELAY(5); /* wait 5 usec. */ } } else { /* Put the FIB on the outbound queue */ if (aac_enqueue_fib(sc, cm->cm_queue, cm) == EBUSY) { aac_unmap_command(cm); sc->flags |= AAC_QUEUE_FRZN; aac_requeue_ready(cm); } } } /* * Unmap a command from controller-visible space. */ static void aac_unmap_command(struct aac_command *cm) { struct aac_softc *sc; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (!(cm->cm_flags & AAC_CMD_MAPPED)) return; if (cm->cm_datalen != 0) { if (cm->cm_flags & AAC_CMD_DATAIN) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_POSTREAD); if (cm->cm_flags & AAC_CMD_DATAOUT) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); } cm->cm_flags &= ~AAC_CMD_MAPPED; } /* * Hardware Interface */ /* * Initialize the adapter. */ static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aac_softc *sc; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_common_busaddr = segs[0].ds_addr; } static int aac_check_firmware(struct aac_softc *sc) { u_int32_t code, major, minor, options = 0, atu_size = 0; int rid, status; time_t then; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Wait for the adapter to come ready. */ then = time_uptime; do { code = AAC_GET_FWSTATUS(sc); if (code & AAC_SELF_TEST_FAILED) { device_printf(sc->aac_dev, "FATAL: selftest failed\n"); return(ENXIO); } if (code & AAC_KERNEL_PANIC) { device_printf(sc->aac_dev, "FATAL: controller kernel panic"); return(ENXIO); } if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { device_printf(sc->aac_dev, "FATAL: controller not coming ready, " "status %x\n", code); return(ENXIO); } } while (!(code & AAC_UP_AND_RUNNING)); /* * Retrieve the firmware version numbers. Dell PERC2/QC cards with * firmware version 1.x are not compatible with this driver. */ if (sc->flags & AAC_FLAGS_PERC2QC) { if (aac_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, NULL)) { device_printf(sc->aac_dev, "Error reading firmware version\n"); return (EIO); } /* These numbers are stored as ASCII! */ major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; if (major == 1) { device_printf(sc->aac_dev, "Firmware version %d.%d is not supported.\n", major, minor); return (EINVAL); } } /* * Retrieve the capabilities/supported options word so we know what * work-arounds to enable. Some firmware revs don't support this * command. */ if (aac_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status)) { if (status != AAC_SRB_STS_INVALID_REQUEST) { device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); return (EIO); } } else { options = AAC_GET_MAILBOX(sc, 1); atu_size = AAC_GET_MAILBOX(sc, 2); sc->supported_options = options; if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && (sc->flags & AAC_FLAGS_NO4GB) == 0) sc->flags |= AAC_FLAGS_4GB_WINDOW; if (options & AAC_SUPPORTED_NONDASD) sc->flags |= AAC_FLAGS_ENABLE_CAM; if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 && (sizeof(bus_addr_t) > 4)) { device_printf(sc->aac_dev, "Enabling 64-bit address support\n"); sc->flags |= AAC_FLAGS_SG_64BIT; } if ((options & AAC_SUPPORTED_NEW_COMM) && sc->aac_if->aif_send_command) sc->flags |= AAC_FLAGS_NEW_COMM; if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) sc->flags |= AAC_FLAGS_ARRAY_64BIT; } /* Check for broken hardware that does a lower number of commands */ sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); /* Remap mem. resource, if required */ if ((sc->flags & AAC_FLAGS_NEW_COMM) && atu_size > rman_get_size(sc->aac_regs_res1)) { rid = rman_get_rid(sc->aac_regs_res1); bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, rid, sc->aac_regs_res1); sc->aac_regs_res1 = bus_alloc_resource_anywhere(sc->aac_dev, SYS_RES_MEMORY, &rid, atu_size, RF_ACTIVE); if (sc->aac_regs_res1 == NULL) { sc->aac_regs_res1 = bus_alloc_resource_any( sc->aac_dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->aac_regs_res1 == NULL) { device_printf(sc->aac_dev, "couldn't allocate register window\n"); return (ENXIO); } sc->flags &= ~AAC_FLAGS_NEW_COMM; } sc->aac_btag1 = rman_get_bustag(sc->aac_regs_res1); sc->aac_bhandle1 = rman_get_bushandle(sc->aac_regs_res1); if (sc->aac_hwif == AAC_HWIF_NARK) { sc->aac_regs_res0 = sc->aac_regs_res1; sc->aac_btag0 = sc->aac_btag1; sc->aac_bhandle0 = sc->aac_bhandle1; } } /* Read preferred settings */ sc->aac_max_fib_size = sizeof(struct aac_fib); sc->aac_max_sectors = 128; /* 64KB */ if (sc->flags & AAC_FLAGS_SG_64BIT) sc->aac_sg_tablesize = (AAC_FIB_DATASIZE - sizeof(struct aac_blockwrite64)) / sizeof(struct aac_sg_entry64); else sc->aac_sg_tablesize = (AAC_FIB_DATASIZE - sizeof(struct aac_blockwrite)) / sizeof(struct aac_sg_entry); if (!aac_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL)) { options = AAC_GET_MAILBOX(sc, 1); sc->aac_max_fib_size = (options & 0xFFFF); sc->aac_max_sectors = (options >> 16) << 1; options = AAC_GET_MAILBOX(sc, 2); sc->aac_sg_tablesize = (options >> 16); options = AAC_GET_MAILBOX(sc, 3); sc->aac_max_fibs = (options & 0xFFFF); } if (sc->aac_max_fib_size > PAGE_SIZE) sc->aac_max_fib_size = PAGE_SIZE; sc->aac_max_fibs_alloc = PAGE_SIZE / sc->aac_max_fib_size; if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { sc->flags |= AAC_FLAGS_RAW_IO; device_printf(sc->aac_dev, "Enable Raw I/O\n"); } if ((sc->flags & AAC_FLAGS_RAW_IO) && (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { sc->flags |= AAC_FLAGS_LBA_64BIT; device_printf(sc->aac_dev, "Enable 64-bit array\n"); } return (0); } static int aac_init(struct aac_softc *sc) { struct aac_adapter_init *ip; u_int32_t qoffset; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Fill in the init structure. This tells the adapter about the * physical location of various important shared data structures. */ ip = &sc->aac_common->ac_init; ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; sc->flags |= AAC_FLAGS_RAW_IO; } ip->MiniPortRevision = AAC_INIT_STRUCT_MINIPORT_REVISION; ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_fibs); ip->AdapterFibsVirtualAddress = 0; ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); ip->AdapterFibAlign = sizeof(struct aac_fib); ip->PrintfBufferAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_printf); ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; /* * The adapter assumes that pages are 4K in size, except on some * broken firmware versions that do the page->byte conversion twice, * therefore 'assuming' that this value is in 16MB units (2^24). * Round up since the granularity is so high. */ ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { ip->HostPhysMemPages = (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; } ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ ip->InitFlags = 0; if (sc->flags & AAC_FLAGS_NEW_COMM) { ip->InitFlags |= AAC_INITFLAGS_NEW_COMM_SUPPORTED; device_printf(sc->aac_dev, "New comm. interface enabled\n"); } ip->MaxIoCommands = sc->aac_max_fibs; ip->MaxIoSize = sc->aac_max_sectors << 9; ip->MaxFibSize = sc->aac_max_fib_size; /* * Initialize FIB queues. Note that it appears that the layout of the * indexes and the segmentation of the entries may be mandated by the * adapter, which is only told about the base of the queue index fields. * * The initial values of the indices are assumed to inform the adapter * of the sizes of the respective queues, and theoretically it could * work out the entire layout of the queue structures from this. We * take the easy route and just lay this area out like everyone else * does. * * The Linux driver uses a much more complex scheme whereby several * header records are kept for each queue. We use a couple of generic * list manipulation functions which 'know' the size of each list by * virtue of a table. */ qoffset = offsetof(struct aac_common, ac_qbuf) + AAC_QUEUE_ALIGN; qoffset &= ~(AAC_QUEUE_ALIGN - 1); sc->aac_queues = (struct aac_queue_table *)((uintptr_t)sc->aac_common + qoffset); ip->CommHeaderAddress = sc->aac_common_busaddr + qoffset; sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_HOST_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_HOST_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_HOST_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_HOST_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_ADAP_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_ADAP_NORM_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_PRODUCER_INDEX] = AAC_ADAP_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_CMD_QUEUE][AAC_CONSUMER_INDEX] = AAC_ADAP_HIGH_CMD_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_HOST_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_HOST_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_HOST_HIGH_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_HOST_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_HOST_HIGH_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_ADAP_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_NORM_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_ADAP_NORM_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_PRODUCER_INDEX]= AAC_ADAP_HIGH_RESP_ENTRIES; sc->aac_queues->qt_qindex[AAC_ADAP_HIGH_RESP_QUEUE][AAC_CONSUMER_INDEX]= AAC_ADAP_HIGH_RESP_ENTRIES; sc->aac_qentries[AAC_HOST_NORM_CMD_QUEUE] = &sc->aac_queues->qt_HostNormCmdQueue[0]; sc->aac_qentries[AAC_HOST_HIGH_CMD_QUEUE] = &sc->aac_queues->qt_HostHighCmdQueue[0]; sc->aac_qentries[AAC_ADAP_NORM_CMD_QUEUE] = &sc->aac_queues->qt_AdapNormCmdQueue[0]; sc->aac_qentries[AAC_ADAP_HIGH_CMD_QUEUE] = &sc->aac_queues->qt_AdapHighCmdQueue[0]; sc->aac_qentries[AAC_HOST_NORM_RESP_QUEUE] = &sc->aac_queues->qt_HostNormRespQueue[0]; sc->aac_qentries[AAC_HOST_HIGH_RESP_QUEUE] = &sc->aac_queues->qt_HostHighRespQueue[0]; sc->aac_qentries[AAC_ADAP_NORM_RESP_QUEUE] = &sc->aac_queues->qt_AdapNormRespQueue[0]; sc->aac_qentries[AAC_ADAP_HIGH_RESP_QUEUE] = &sc->aac_queues->qt_AdapHighRespQueue[0]; /* * Do controller-type-specific initialisation */ switch (sc->aac_hwif) { case AAC_HWIF_I960RX: AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, ~0); break; case AAC_HWIF_RKT: AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, ~0); break; default: break; } /* * Give the init structure to the controller. */ if (aac_sync_command(sc, AAC_MONKER_INITSTRUCT, sc->aac_common_busaddr + offsetof(struct aac_common, ac_init), 0, 0, 0, NULL)) { device_printf(sc->aac_dev, "error establishing init structure\n"); error = EIO; goto out; } error = 0; out: return(error); } static int aac_setup_intr(struct aac_softc *sc) { if (sc->flags & AAC_FLAGS_NEW_COMM) { if (bus_setup_intr(sc->aac_dev, sc->aac_irq, INTR_MPSAFE|INTR_TYPE_BIO, NULL, aac_new_intr, sc, &sc->aac_intr)) { device_printf(sc->aac_dev, "can't set up interrupt\n"); return (EINVAL); } } else { if (bus_setup_intr(sc->aac_dev, sc->aac_irq, INTR_TYPE_BIO, aac_filter, NULL, sc, &sc->aac_intr)) { device_printf(sc->aac_dev, "can't set up interrupt filter\n"); return (EINVAL); } } return (0); } /* * Send a synchronous command to the controller and wait for a result. * Indicate if the controller completed the command with an error status. */ static int aac_sync_command(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, u_int32_t *sp) { time_t then; u_int32_t status; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* populate the mailbox */ AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); /* ensure the sync command doorbell flag is cleared */ AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); /* then set it to signal the adapter */ AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); /* spin waiting for the command to complete */ then = time_uptime; do { if (time_uptime > (then + AAC_IMMEDIATE_TIMEOUT)) { fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); return(EIO); } } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); /* clear the completion flag */ AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); /* get the command status */ status = AAC_GET_MAILBOX(sc, 0); if (sp != NULL) *sp = status; if (status != AAC_SRB_STS_SUCCESS) return (-1); return(0); } int aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, struct aac_fib *fib, u_int16_t datasize) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); if (datasize > AAC_FIB_DATASIZE) return(EINVAL); /* * Set up the sync FIB */ fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY; fib->Header.XferState |= xferstate; fib->Header.Command = command; fib->Header.StructType = AAC_FIBTYPE_TFIB; fib->Header.Size = sizeof(struct aac_fib_header) + datasize; fib->Header.SenderSize = sizeof(struct aac_fib); fib->Header.SenderFibAddress = 0; /* Not needed */ fib->Header.ReceiverFibAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_sync_fib); /* * Give the FIB to the controller, wait for a response. */ if (aac_sync_command(sc, AAC_MONKER_SYNCFIB, fib->Header.ReceiverFibAddress, 0, 0, 0, NULL)) { fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); return(EIO); } return (0); } /* * Adapter-space FIB queue manipulation * * Note that the queue implementation here is a little funky; neither the PI or * CI will ever be zero. This behaviour is a controller feature. */ static const struct { int size; int notify; } aac_qinfo[] = { {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL}, {AAC_HOST_HIGH_CMD_ENTRIES, 0}, {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY}, {AAC_ADAP_HIGH_CMD_ENTRIES, 0}, {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL}, {AAC_HOST_HIGH_RESP_ENTRIES, 0}, {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY}, {AAC_ADAP_HIGH_RESP_ENTRIES, 0} }; /* * Atomically insert an entry into the nominated queue, returns 0 on success or * EBUSY if the queue is full. * * Note: it would be more efficient to defer notifying the controller in * the case where we may be inserting several entries in rapid succession, * but implementing this usefully may be difficult (it would involve a * separate queue/notify interface). */ static int aac_enqueue_fib(struct aac_softc *sc, int queue, struct aac_command *cm) { u_int32_t pi, ci; int error; u_int32_t fib_size; u_int32_t fib_addr; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); fib_size = cm->cm_fib->Header.Size; fib_addr = cm->cm_fib->Header.ReceiverFibAddress; /* get the producer/consumer indices */ pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; /* wrap the queue? */ if (pi >= aac_qinfo[queue].size) pi = 0; /* check for queue full */ if ((pi + 1) == ci) { error = EBUSY; goto out; } /* * To avoid a race with its completion interrupt, place this command on * the busy queue prior to advertising it to the controller. */ aac_enqueue_busy(cm); /* populate queue entry */ (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; /* update producer index */ sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; /* notify the adapter if we know how */ if (aac_qinfo[queue].notify != 0) AAC_QNOTIFY(sc, aac_qinfo[queue].notify); error = 0; out: return(error); } /* * Atomically remove one entry from the nominated queue, returns 0 on * success or ENOENT if the queue is empty. */ static int aac_dequeue_fib(struct aac_softc *sc, int queue, u_int32_t *fib_size, struct aac_fib **fib_addr) { u_int32_t pi, ci; u_int32_t fib_index; int error; int notify; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* get the producer/consumer indices */ pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; /* check for queue empty */ if (ci == pi) { error = ENOENT; goto out; } /* wrap the pi so the following test works */ if (pi >= aac_qinfo[queue].size) pi = 0; notify = 0; if (ci == pi + 1) notify++; /* wrap the queue? */ if (ci >= aac_qinfo[queue].size) ci = 0; /* fetch the entry */ *fib_size = (sc->aac_qentries[queue] + ci)->aq_fib_size; switch (queue) { case AAC_HOST_NORM_CMD_QUEUE: case AAC_HOST_HIGH_CMD_QUEUE: /* * The aq_fib_addr is only 32 bits wide so it can't be counted * on to hold an address. For AIF's, the adapter assumes * that it's giving us an address into the array of AIF fibs. * Therefore, we have to convert it to an index. */ fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr / sizeof(struct aac_fib); *fib_addr = &sc->aac_common->ac_fibs[fib_index]; break; case AAC_HOST_NORM_RESP_QUEUE: case AAC_HOST_HIGH_RESP_QUEUE: { struct aac_command *cm; /* * As above, an index is used instead of an actual address. * Gotta shift the index to account for the fast response * bit. No other correction is needed since this value was * originally provided by the driver via the SenderFibAddress * field. */ fib_index = (sc->aac_qentries[queue] + ci)->aq_fib_addr; cm = sc->aac_commands + (fib_index >> 2); *fib_addr = cm->cm_fib; /* * Is this a fast response? If it is, update the fib fields in * local memory since the whole fib isn't DMA'd back up. */ if (fib_index & 0x01) { (*fib_addr)->Header.XferState |= AAC_FIBSTATE_DONEADAP; *((u_int32_t*)((*fib_addr)->data)) = AAC_ERROR_NORMAL; } break; } default: panic("Invalid queue in aac_dequeue_fib()"); break; } /* update consumer index */ sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX] = ci + 1; /* if we have made the queue un-full, notify the adapter */ if (notify && (aac_qinfo[queue].notify != 0)) AAC_QNOTIFY(sc, aac_qinfo[queue].notify); error = 0; out: return(error); } /* * Put our response to an Adapter Initialed Fib on the response queue */ static int aac_enqueue_response(struct aac_softc *sc, int queue, struct aac_fib *fib) { u_int32_t pi, ci; int error; u_int32_t fib_size; u_int32_t fib_addr; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* Tell the adapter where the FIB is */ fib_size = fib->Header.Size; fib_addr = fib->Header.SenderFibAddress; fib->Header.ReceiverFibAddress = fib_addr; /* get the producer/consumer indices */ pi = sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX]; ci = sc->aac_queues->qt_qindex[queue][AAC_CONSUMER_INDEX]; /* wrap the queue? */ if (pi >= aac_qinfo[queue].size) pi = 0; /* check for queue full */ if ((pi + 1) == ci) { error = EBUSY; goto out; } /* populate queue entry */ (sc->aac_qentries[queue] + pi)->aq_fib_size = fib_size; (sc->aac_qentries[queue] + pi)->aq_fib_addr = fib_addr; /* update producer index */ sc->aac_queues->qt_qindex[queue][AAC_PRODUCER_INDEX] = pi + 1; /* notify the adapter if we know how */ if (aac_qinfo[queue].notify != 0) AAC_QNOTIFY(sc, aac_qinfo[queue].notify); error = 0; out: return(error); } /* * Check for commands that have been outstanding for a suspiciously long time, * and complain about them. */ static void aac_timeout(struct aac_softc *sc) { struct aac_command *cm; time_t deadline; int timedout, code; /* * Traverse the busy command list, bitch about late commands once * only. */ timedout = 0; deadline = time_uptime - AAC_CMD_TIMEOUT; TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { if ((cm->cm_timestamp < deadline) && !(cm->cm_flags & AAC_CMD_TIMEDOUT)) { cm->cm_flags |= AAC_CMD_TIMEDOUT; device_printf(sc->aac_dev, "COMMAND %p (TYPE %d) TIMEOUT AFTER %d SECONDS\n", cm, cm->cm_fib->Header.Command, (int)(time_uptime-cm->cm_timestamp)); AAC_PRINT_FIB(sc, cm->cm_fib); timedout++; } } if (timedout) { code = AAC_GET_FWSTATUS(sc); if (code != AAC_UP_AND_RUNNING) { device_printf(sc->aac_dev, "WARNING! Controller is no " "longer running! code= 0x%x\n", code); } } } /* * Interface Function Vectors */ /* * Read the current firmware status word. */ static int aac_sa_get_fwstatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_SA_FWSTATUS)); } static int aac_rx_get_fwstatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? AAC_RX_OMR0 : AAC_RX_FWSTATUS)); } static int aac_rkt_get_fwstatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, sc->flags & AAC_FLAGS_NEW_COMM ? AAC_RKT_OMR0 : AAC_RKT_FWSTATUS)); } /* * Notify the controller of a change in a given queue */ static void aac_sa_qnotify(struct aac_softc *sc, int qbit) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL1_SET, qbit); } static void aac_rx_qnotify(struct aac_softc *sc, int qbit) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RX_IDBR, qbit); } static void aac_rkt_qnotify(struct aac_softc *sc, int qbit) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RKT_IDBR, qbit); } /* * Get the interrupt reason bits */ static int aac_sa_get_istatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG2(sc, AAC_SA_DOORBELL0)); } static int aac_rx_get_istatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_RX_ODBR)); } static int aac_rkt_get_istatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_RKT_ODBR)); } /* * Clear some interrupt reason bits */ static void aac_sa_clear_istatus(struct aac_softc *sc, int mask) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG2(sc, AAC_SA_DOORBELL0_CLEAR, mask); } static void aac_rx_clear_istatus(struct aac_softc *sc, int mask) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RX_ODBR, mask); } static void aac_rkt_clear_istatus(struct aac_softc *sc, int mask) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RKT_ODBR, mask); } /* * Populate the mailbox and set the command word */ static void aac_sa_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX, command); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 4, arg0); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 8, arg1); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 12, arg2); AAC_MEM1_SETREG4(sc, AAC_SA_MAILBOX + 16, arg3); } static void aac_rx_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX, command); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 4, arg0); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 8, arg1); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 12, arg2); AAC_MEM1_SETREG4(sc, AAC_RX_MAILBOX + 16, arg3); } static void aac_rkt_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX, command); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 4, arg0); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 8, arg1); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 12, arg2); AAC_MEM1_SETREG4(sc, AAC_RKT_MAILBOX + 16, arg3); } /* * Fetch the immediate command status word */ static int aac_sa_get_mailbox(struct aac_softc *sc, int mb) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM1_GETREG4(sc, AAC_SA_MAILBOX + (mb * 4))); } static int aac_rx_get_mailbox(struct aac_softc *sc, int mb) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM1_GETREG4(sc, AAC_RX_MAILBOX + (mb * 4))); } static int aac_rkt_get_mailbox(struct aac_softc *sc, int mb) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM1_GETREG4(sc, AAC_RKT_MAILBOX + (mb * 4))); } /* * Set/clear interrupt masks */ static void aac_sa_set_interrupts(struct aac_softc *sc, int enable) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); if (enable) { AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_CLEAR, AAC_DB_INTERRUPTS); } else { AAC_MEM0_SETREG2((sc), AAC_SA_MASK0_SET, ~0); } } static void aac_rx_set_interrupts(struct aac_softc *sc, int enable) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); if (enable) { if (sc->flags & AAC_FLAGS_NEW_COMM) AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INT_NEW_COMM); else AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~AAC_DB_INTERRUPTS); } else { AAC_MEM0_SETREG4(sc, AAC_RX_OIMR, ~0); } } static void aac_rkt_set_interrupts(struct aac_softc *sc, int enable) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "%sable interrupts", enable ? "en" : "dis"); if (enable) { if (sc->flags & AAC_FLAGS_NEW_COMM) AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INT_NEW_COMM); else AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~AAC_DB_INTERRUPTS); } else { AAC_MEM0_SETREG4(sc, AAC_RKT_OIMR, ~0); } } /* * New comm. interface: Send command functions */ static int aac_rx_send_command(struct aac_softc *sc, struct aac_command *cm) { u_int32_t index, device; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); if (index == 0xffffffffL) index = AAC_MEM0_GETREG4(sc, AAC_RX_IQUE); if (index == 0xffffffffL) return index; aac_enqueue_busy(cm); device = index; AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); device += 4; AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); device += 4; AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); AAC_MEM0_SETREG4(sc, AAC_RX_IQUE, index); return 0; } static int aac_rkt_send_command(struct aac_softc *sc, struct aac_command *cm) { u_int32_t index, device; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm.)"); index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); if (index == 0xffffffffL) index = AAC_MEM0_GETREG4(sc, AAC_RKT_IQUE); if (index == 0xffffffffL) return index; aac_enqueue_busy(cm); device = index; AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys & 0xffffffffUL)); device += 4; AAC_MEM1_SETREG4(sc, device, (u_int32_t)(cm->cm_fibphys >> 32)); device += 4; AAC_MEM1_SETREG4(sc, device, cm->cm_fib->Header.Size); AAC_MEM0_SETREG4(sc, AAC_RKT_IQUE, index); return 0; } /* * New comm. interface: get, set outbound queue index */ static int aac_rx_get_outb_queue(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_RX_OQUE)); } static int aac_rkt_get_outb_queue(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_RKT_OQUE)); } static void aac_rx_set_outb_queue(struct aac_softc *sc, int index) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RX_OQUE, index); } static void aac_rkt_set_outb_queue(struct aac_softc *sc, int index) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_RKT_OQUE, index); } /* * Debugging and Diagnostics */ /* * Print some information about the controller. */ static void aac_describe_controller(struct aac_softc *sc) { struct aac_fib *fib; struct aac_adapter_info *info; char *adapter_type = "Adaptec RAID controller"; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); fib->data[0] = 0; if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } /* save the kernel revision structure for later use */ info = (struct aac_adapter_info *)&fib->data[0]; sc->aac_revision = info->KernelRevision; if (bootverbose) { device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " "(%dMB cache, %dMB execution), %s\n", aac_describe_code(aac_cpu_variant, info->CpuVariant), info->ClockSpeed, info->TotalMem / (1024 * 1024), info->BufferMem / (1024 * 1024), info->ExecutionMem / (1024 * 1024), aac_describe_code(aac_battery_platform, info->batteryPlatform)); device_printf(sc->aac_dev, "Kernel %d.%d-%d, Build %d, S/N %6X\n", info->KernelRevision.external.comp.major, info->KernelRevision.external.comp.minor, info->KernelRevision.external.comp.dash, info->KernelRevision.buildNumber, (u_int32_t)(info->SerialNumber & 0xffffff)); device_printf(sc->aac_dev, "Supported Options=%b\n", sc->supported_options, "\20" "\1SNAPSHOT" "\2CLUSTERS" "\3WCACHE" "\4DATA64" "\5HOSTTIME" "\6RAID50" "\7WINDOW4GB" "\10SCSIUPGD" "\11SOFTERR" "\12NORECOND" "\13SGMAP64" "\14ALARM" "\15NONDASD" "\16SCSIMGT" "\17RAIDSCSI" "\21ADPTINFO" "\22NEWCOMM" "\23ARRAY64BIT" "\24HEATSENSOR"); } if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { fib->data[0] = 0; if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n"); else adapter_type = ((struct aac_supplement_adapter_info *) &fib->data[0])->AdapterTypeText; } device_printf(sc->aac_dev, "%s, aac driver %d.%d.%d-%d\n", adapter_type, AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION, AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); } /* * Look up a text description of a numeric error code and return a pointer to * same. */ static const char * aac_describe_code(const struct aac_code_lookup *table, u_int32_t code) { int i; for (i = 0; table[i].string != NULL; i++) if (table[i].code == code) return(table[i].string); return(table[i + 1].string); } /* * Management Interface */ static int aac_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct aac_softc *sc; sc = dev->si_drv1; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); device_busy(sc->aac_dev); devfs_set_cdevpriv(sc, aac_cdevpriv_dtor); return 0; } static int aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { union aac_statrequest *as; struct aac_softc *sc; int error = 0; as = (union aac_statrequest *)arg; sc = dev->si_drv1; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); switch (cmd) { case AACIO_STATS: switch (as->as_item) { case AACQ_FREE: case AACQ_BIO: case AACQ_READY: case AACQ_BUSY: bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, sizeof(struct aac_qstat)); break; default: error = ENOENT; break; } break; case FSACTL_SENDFIB: case FSACTL_SEND_LARGE_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_SENDFIB: case FSACTL_LNX_SEND_LARGE_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); error = aac_ioctl_sendfib(sc, arg); break; case FSACTL_SEND_RAW_SRB: arg = *(caddr_t*)arg; case FSACTL_LNX_SEND_RAW_SRB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); error = aac_ioctl_send_raw_srb(sc, arg); break; case FSACTL_AIF_THREAD: case FSACTL_LNX_AIF_THREAD: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); error = EINVAL; break; case FSACTL_OPEN_GET_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); error = aac_open_aif(sc, arg); break; case FSACTL_GET_NEXT_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); error = aac_getnext_aif(sc, arg); break; case FSACTL_CLOSE_GET_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); error = aac_close_aif(sc, arg); break; case FSACTL_MINIPORT_REV_CHECK: arg = *(caddr_t*)arg; case FSACTL_LNX_MINIPORT_REV_CHECK: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); error = aac_rev_check(sc, arg); break; case FSACTL_QUERY_DISK: arg = *(caddr_t*)arg; case FSACTL_LNX_QUERY_DISK: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); error = aac_query_disk(sc, arg); break; case FSACTL_DELETE_DISK: case FSACTL_LNX_DELETE_DISK: /* * We don't trust the underland to tell us when to delete a * container, rather we rely on an AIF coming from the * controller */ error = 0; break; case FSACTL_GET_PCI_INFO: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_PCI_INFO: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); error = aac_get_pci_info(sc, arg); break; case FSACTL_GET_FEATURES: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_FEATURES: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES"); error = aac_supported_features(sc, arg); break; default: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); error = EINVAL; break; } return(error); } static int aac_poll(struct cdev *dev, int poll_events, struct thread *td) { struct aac_softc *sc; struct aac_fib_context *ctx; int revents; sc = dev->si_drv1; revents = 0; mtx_lock(&sc->aac_aifq_lock); if ((poll_events & (POLLRDNORM | POLLIN)) != 0) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) { revents |= poll_events & (POLLIN | POLLRDNORM); break; } } } mtx_unlock(&sc->aac_aifq_lock); if (revents == 0) { if (poll_events & (POLLIN | POLLRDNORM)) selrecord(td, &sc->rcv_select); } return (revents); } static void aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) { switch (event->ev_type) { case AAC_EVENT_CMFREE: mtx_assert(&sc->aac_io_lock, MA_OWNED); if (aac_alloc_command(sc, (struct aac_command **)arg)) { aac_add_event(sc, event); return; } free(event, M_AACBUF); wakeup(arg); break; default: break; } } /* * Send a FIB supplied from userspace */ static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) { struct aac_command *cm; int size, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); cm = NULL; /* * Get a command */ mtx_lock(&sc->aac_io_lock); if (aac_alloc_command(sc, &cm)) { struct aac_event *event; event = malloc(sizeof(struct aac_event), M_AACBUF, M_NOWAIT | M_ZERO); if (event == NULL) { error = EBUSY; mtx_unlock(&sc->aac_io_lock); goto out; } event->ev_type = AAC_EVENT_CMFREE; event->ev_callback = aac_ioctl_event; event->ev_arg = &cm; aac_add_event(sc, event); msleep(&cm, &sc->aac_io_lock, 0, "sendfib", 0); } mtx_unlock(&sc->aac_io_lock); /* * Fetch the FIB header, then re-copy to get data as well. */ if ((error = copyin(ufib, cm->cm_fib, sizeof(struct aac_fib_header))) != 0) goto out; size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); if (size > sc->aac_max_fib_size) { device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", size, sc->aac_max_fib_size); size = sc->aac_max_fib_size; } if ((error = copyin(ufib, cm->cm_fib, size)) != 0) goto out; cm->cm_fib->Header.Size = size; cm->cm_timestamp = time_uptime; /* * Pass the FIB to the controller, wait for it to complete. */ mtx_lock(&sc->aac_io_lock); error = aac_wait_command(cm); mtx_unlock(&sc->aac_io_lock); if (error != 0) { device_printf(sc->aac_dev, "aac_wait_command return %d\n", error); goto out; } /* * Copy the FIB and data back out to the caller. */ size = cm->cm_fib->Header.Size; if (size > sc->aac_max_fib_size) { device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", size, sc->aac_max_fib_size); size = sc->aac_max_fib_size; } error = copyout(cm->cm_fib, ufib, size); out: if (cm != NULL) { mtx_lock(&sc->aac_io_lock); aac_release_command(cm); mtx_unlock(&sc->aac_io_lock); } return(error); } /* * Send a passthrough FIB supplied from userspace */ static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) { struct aac_command *cm; struct aac_event *event; struct aac_fib *fib; struct aac_srb *srbcmd, *user_srb; struct aac_sg_entry *sge; void *srb_sg_address, *ureply; uint32_t fibsize, srb_sg_bytecount; int error, transfer_data; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); cm = NULL; transfer_data = 0; fibsize = 0; user_srb = (struct aac_srb *)arg; mtx_lock(&sc->aac_io_lock); if (aac_alloc_command(sc, &cm)) { event = malloc(sizeof(struct aac_event), M_AACBUF, M_NOWAIT | M_ZERO); if (event == NULL) { error = EBUSY; mtx_unlock(&sc->aac_io_lock); goto out; } event->ev_type = AAC_EVENT_CMFREE; event->ev_callback = aac_ioctl_event; event->ev_arg = &cm; aac_add_event(sc, event); msleep(cm, &sc->aac_io_lock, 0, "aacraw", 0); } mtx_unlock(&sc->aac_io_lock); cm->cm_data = NULL; fib = cm->cm_fib; srbcmd = (struct aac_srb *)fib->data; error = copyin(&user_srb->data_len, &fibsize, sizeof(uint32_t)); if (error != 0) goto out; if (fibsize > (sc->aac_max_fib_size - sizeof(struct aac_fib_header))) { error = EINVAL; goto out; } error = copyin(user_srb, srbcmd, fibsize); if (error != 0) goto out; srbcmd->function = 0; srbcmd->retry_limit = 0; if (srbcmd->sg_map.SgCount > 1) { error = EINVAL; goto out; } /* Retrieve correct SG entries. */ if (fibsize == (sizeof(struct aac_srb) + srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) { struct aac_sg_entry sg; sge = srbcmd->sg_map.SgEntry; if ((error = copyin(sge, &sg, sizeof(sg))) != 0) goto out; srb_sg_bytecount = sg.SgByteCount; srb_sg_address = (void *)(uintptr_t)sg.SgAddress; } #ifdef __amd64__ else if (fibsize == (sizeof(struct aac_srb) + srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) { struct aac_sg_entry64 *sge64; struct aac_sg_entry64 sg; sge = NULL; sge64 = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry; if ((error = copyin(sge64, &sg, sizeof(sg))) != 0) goto out; srb_sg_bytecount = sg.SgByteCount; srb_sg_address = (void *)sg.SgAddress; if (sge64->SgAddress > 0xffffffffull && (sc->flags & AAC_FLAGS_SG_64BIT) == 0) { error = EINVAL; goto out; } } #endif else { error = EINVAL; goto out; } ureply = (char *)arg + fibsize; srbcmd->data_len = srb_sg_bytecount; if (srbcmd->sg_map.SgCount == 1) transfer_data = 1; cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map; if (transfer_data) { cm->cm_datalen = srb_sg_bytecount; cm->cm_data = malloc(cm->cm_datalen, M_AACBUF, M_NOWAIT); if (cm->cm_data == NULL) { error = ENOMEM; goto out; } if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) cm->cm_flags |= AAC_CMD_DATAIN; if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) { cm->cm_flags |= AAC_CMD_DATAOUT; error = copyin(srb_sg_address, cm->cm_data, cm->cm_datalen); if (error != 0) goto out; } } fib->Header.Size = sizeof(struct aac_fib_header) + sizeof(struct aac_srb); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC | AAC_FIBSTATE_FAST_RESPONSE; fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) != 0 ? ScsiPortCommandU64 : ScsiPortCommand; mtx_lock(&sc->aac_io_lock); aac_wait_command(cm); mtx_unlock(&sc->aac_io_lock); if (transfer_data && (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) != 0) { error = copyout(cm->cm_data, srb_sg_address, cm->cm_datalen); if (error != 0) goto out; } error = copyout(fib->data, ureply, sizeof(struct aac_srb_response)); out: if (cm != NULL) { if (cm->cm_data != NULL) free(cm->cm_data, M_AACBUF); mtx_lock(&sc->aac_io_lock); aac_release_command(cm); mtx_unlock(&sc->aac_io_lock); } return(error); } /* * cdevpriv interface private destructor. */ static void aac_cdevpriv_dtor(void *arg) { struct aac_softc *sc; sc = arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); device_unbusy(sc->aac_dev); } /* * Handle an AIF sent to us by the controller; queue it for later reference. * If the queue fills up, then drop the older entries. */ static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) { struct aac_aif_command *aif; struct aac_container *co, *co_next; struct aac_fib_context *ctx; struct aac_mntinforesp *mir; int next, current, found; int count = 0, added = 0, i = 0; uint32_t channel; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); aif = (struct aac_aif_command*)&fib->data[0]; aac_print_aif(sc, aif); /* Is it an event that we should care about? */ switch (aif->command) { case AifCmdEventNotify: switch (aif->data.EN.type) { case AifEnAddContainer: case AifEnDeleteContainer: /* * A container was added or deleted, but the message * doesn't tell us anything else! Re-enumerate the * containers and sort things out. */ aac_alloc_sync_fib(sc, &fib); do { /* * Ask the controller for its containers one at * a time. * XXX What if the controller's list changes * midway through this enumaration? * XXX This should be done async. */ if ((mir = aac_get_container_info(sc, fib, i)) == NULL) continue; if (i == 0) count = mir->MntRespCount; /* * Check the container against our list. * co->co_found was already set to 0 in a * previous run. */ if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { found = 0; TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { if (co->co_mntobj.ObjectId == mir->MntTable[0].ObjectId) { co->co_found = 1; found = 1; break; } } /* * If the container matched, continue * in the list. */ if (found) { i++; continue; } /* * This is a new container. Do all the * appropriate things to set it up. */ aac_add_container(sc, mir, 1); added = 1; } i++; } while ((i < count) && (i < AAC_MAX_CONTAINERS)); aac_release_sync_fib(sc); /* * Go through our list of containers and see which ones * were not marked 'found'. Since the controller didn't * list them they must have been deleted. Do the * appropriate steps to destroy the device. Also reset * the co->co_found field. */ co = TAILQ_FIRST(&sc->aac_container_tqh); while (co != NULL) { if (co->co_found == 0) { mtx_unlock(&sc->aac_io_lock); bus_topo_lock(); device_delete_child(sc->aac_dev, co->co_disk); bus_topo_unlock(); mtx_lock(&sc->aac_io_lock); co_next = TAILQ_NEXT(co, co_link); mtx_lock(&sc->aac_container_lock); TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); mtx_unlock(&sc->aac_container_lock); free(co, M_AACBUF); co = co_next; } else { co->co_found = 0; co = TAILQ_NEXT(co, co_link); } } /* Attach the newly created containers */ if (added) { mtx_unlock(&sc->aac_io_lock); bus_topo_lock(); bus_attach_children(sc->aac_dev); bus_topo_unlock(); mtx_lock(&sc->aac_io_lock); } break; case AifEnEnclosureManagement: switch (aif->data.EN.data.EEE.eventType) { case AIF_EM_DRIVE_INSERTION: case AIF_EM_DRIVE_REMOVAL: channel = aif->data.EN.data.EEE.unitID; if (sc->cam_rescan_cb != NULL) sc->cam_rescan_cb(sc, (channel >> 24) & 0xF, (channel & 0xFFFF)); break; } break; case AifEnAddJBOD: case AifEnDeleteJBOD: channel = aif->data.EN.data.ECE.container; if (sc->cam_rescan_cb != NULL) sc->cam_rescan_cb(sc, (channel >> 24) & 0xF, AAC_CAM_TARGET_WILDCARD); break; default: break; } default: break; } /* Copy the AIF data to the AIF queue for ioctl retrieval */ mtx_lock(&sc->aac_aifq_lock); current = sc->aifq_idx; next = (current + 1) % AAC_AIFQ_LENGTH; if (next == 0) sc->aifq_filled = 1; bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); /* modify AIF contexts */ if (sc->aifq_filled) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (next == ctx->ctx_idx) ctx->ctx_wrap = 1; else if (current == ctx->ctx_idx && ctx->ctx_wrap) ctx->ctx_idx = next; } } sc->aifq_idx = next; /* On the off chance that someone is sleeping for an aif... */ if (sc->aac_state & AAC_STATE_AIF_SLEEPER) wakeup(sc->aac_aifq); /* Wakeup any poll()ers */ selwakeuppri(&sc->rcv_select, PRIBIO); mtx_unlock(&sc->aac_aifq_lock); } /* * Return the Revision of the driver to userspace and check to see if the * userspace app is possibly compatible. This is extremely bogus since * our driver doesn't follow Adaptec's versioning system. Cheat by just * returning what the card reported. */ static int aac_rev_check(struct aac_softc *sc, caddr_t udata) { struct aac_rev_check rev_check; struct aac_rev_check_resp rev_check_resp; int error = 0; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Copyin the revision struct from userspace */ if ((error = copyin(udata, (caddr_t)&rev_check, sizeof(struct aac_rev_check))) != 0) { return error; } fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", rev_check.callingRevision.buildNumber); /* * Doctor up the response struct. */ rev_check_resp.possiblyCompatible = 1; rev_check_resp.adapterSWRevision.external.comp.major = AAC_DRIVER_MAJOR_VERSION; rev_check_resp.adapterSWRevision.external.comp.minor = AAC_DRIVER_MINOR_VERSION; rev_check_resp.adapterSWRevision.external.comp.type = AAC_DRIVER_TYPE; rev_check_resp.adapterSWRevision.external.comp.dash = AAC_DRIVER_BUGFIX_LEVEL; rev_check_resp.adapterSWRevision.buildNumber = AAC_DRIVER_BUILD; return(copyout((caddr_t)&rev_check_resp, udata, sizeof(struct aac_rev_check_resp))); } /* * Pass the fib context to the caller */ static int aac_open_aif(struct aac_softc *sc, caddr_t arg) { struct aac_fib_context *fibctx, *ctx; int error = 0; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); fibctx = malloc(sizeof(struct aac_fib_context), M_AACBUF, M_NOWAIT|M_ZERO); if (fibctx == NULL) return (ENOMEM); mtx_lock(&sc->aac_aifq_lock); /* all elements are already 0, add to queue */ if (sc->fibctx == NULL) sc->fibctx = fibctx; else { for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) ; ctx->next = fibctx; fibctx->prev = ctx; } /* evaluate unique value */ fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); ctx = sc->fibctx; while (ctx != fibctx) { if (ctx->unique == fibctx->unique) { fibctx->unique++; ctx = sc->fibctx; } else { ctx = ctx->next; } } mtx_unlock(&sc->aac_aifq_lock); error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); if (error) aac_close_aif(sc, (caddr_t)ctx); return error; } /* * Close the caller's fib context */ static int aac_close_aif(struct aac_softc *sc, caddr_t arg) { struct aac_fib_context *ctx; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_aifq_lock); for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (ctx->unique == *(uint32_t *)&arg) { if (ctx == sc->fibctx) sc->fibctx = NULL; else { ctx->prev->next = ctx->next; if (ctx->next) ctx->next->prev = ctx->prev; } break; } } mtx_unlock(&sc->aac_aifq_lock); if (ctx) free(ctx, M_AACBUF); return 0; } /* * Pass the caller the next AIF in their queue */ static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg) { struct get_adapter_fib_ioctl agf; struct aac_fib_context *ctx; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) { struct get_adapter_fib_ioctl32 agf32; error = copyin(arg, &agf32, sizeof(agf32)); if (error == 0) { agf.AdapterFibContext = agf32.AdapterFibContext; agf.Wait = agf32.Wait; agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib; } } else #endif error = copyin(arg, &agf, sizeof(agf)); if (error == 0) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (agf.AdapterFibContext == ctx->unique) break; } if (!ctx) return (EFAULT); error = aac_return_aif(sc, ctx, agf.AifFib); if (error == EAGAIN && agf.Wait) { fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); sc->aac_state |= AAC_STATE_AIF_SLEEPER; while (error == EAGAIN) { error = tsleep(sc->aac_aifq, PRIBIO | PCATCH, "aacaif", 0); if (error == 0) error = aac_return_aif(sc, ctx, agf.AifFib); } sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; } } return(error); } /* * Hand the next AIF off the top of the queue out to userspace. */ static int aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) { int current, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_aifq_lock); current = ctx->ctx_idx; if (current == sc->aifq_idx && !ctx->ctx_wrap) { /* empty */ mtx_unlock(&sc->aac_aifq_lock); return (EAGAIN); } error = copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); if (error) device_printf(sc->aac_dev, "aac_return_aif: copyout returned %d\n", error); else { ctx->ctx_wrap = 0; ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; } mtx_unlock(&sc->aac_aifq_lock); return(error); } static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) { struct aac_pci_info { u_int32_t bus; u_int32_t slot; } pciinf; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); pciinf.bus = pci_get_bus(sc->aac_dev); pciinf.slot = pci_get_slot(sc->aac_dev); error = copyout((caddr_t)&pciinf, uptr, sizeof(struct aac_pci_info)); return (error); } static int aac_supported_features(struct aac_softc *sc, caddr_t uptr) { struct aac_features f; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if ((error = copyin(uptr, &f, sizeof (f))) != 0) return (error); /* * When the management driver receives FSACTL_GET_FEATURES ioctl with * ALL zero in the featuresState, the driver will return the current * state of all the supported features, the data field will not be * valid. * When the management driver receives FSACTL_GET_FEATURES ioctl with * a specific bit set in the featuresState, the driver will return the * current state of this specific feature and whatever data that are * associated with the feature in the data field or perform whatever * action needed indicates in the data field. */ if (f.feat.fValue == 0) { f.feat.fBits.largeLBA = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; /* TODO: In the future, add other features state here as well */ } else { if (f.feat.fBits.largeLBA) f.feat.fBits.largeLBA = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; /* TODO: Add other features state and data in the future */ } error = copyout(&f, uptr, sizeof (f)); return (error); } /* * Give the userland some information about the container. The AAC arch * expects the driver to be a SCSI passthrough type driver, so it expects * the containers to have b:t:l numbers. Fake it. */ static int aac_query_disk(struct aac_softc *sc, caddr_t uptr) { struct aac_query_disk query_disk; struct aac_container *co; struct aac_disk *disk; int error, id; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); disk = NULL; error = copyin(uptr, (caddr_t)&query_disk, sizeof(struct aac_query_disk)); if (error) return (error); id = query_disk.ContainerNumber; if (id == -1) return (EINVAL); mtx_lock(&sc->aac_container_lock); TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { if (co->co_mntobj.ObjectId == id) break; } if (co == NULL) { query_disk.Valid = 0; query_disk.Locked = 0; query_disk.Deleted = 1; /* XXX is this right? */ } else { disk = device_get_softc(co->co_disk); query_disk.Valid = 1; query_disk.Locked = (disk->ad_flags & AAC_DISK_OPEN) ? 1 : 0; query_disk.Deleted = 0; query_disk.Bus = device_get_unit(sc->aac_dev); query_disk.Target = disk->unit; query_disk.Lun = 0; query_disk.UnMapped = 0; sprintf(&query_disk.diskDeviceName[0], "%s%d", disk->ad_disk->d_name, disk->ad_disk->d_unit); } mtx_unlock(&sc->aac_container_lock); error = copyout((caddr_t)&query_disk, uptr, sizeof(struct aac_query_disk)); return (error); } static void aac_get_bus_info(struct aac_softc *sc) { struct aac_fib *fib; struct aac_ctcfg *c_cmd; struct aac_ctcfg_resp *c_resp; struct aac_vmioctl *vmi; struct aac_vmi_businf_resp *vmi_resp; struct aac_getbusinf businfo; struct aac_sim *caminf; device_t child; int i, found, error; mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); c_cmd = (struct aac_ctcfg *)&fib->data[0]; bzero(c_cmd, sizeof(struct aac_ctcfg)); c_cmd->Command = VM_ContainerConfig; c_cmd->cmd = CT_GET_SCSI_METHOD; c_cmd->param = 0; error = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_ctcfg)); if (error) { device_printf(sc->aac_dev, "Error %d sending " "VM_ContainerConfig command\n", error); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; if (c_resp->Status != ST_OK) { device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", c_resp->Status); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } sc->scsi_method_id = c_resp->param; vmi = (struct aac_vmioctl *)&fib->data[0]; bzero(vmi, sizeof(struct aac_vmioctl)); vmi->Command = VM_Ioctl; vmi->ObjType = FT_DRIVE; vmi->MethId = sc->scsi_method_id; vmi->ObjId = 0; vmi->IoctlCmd = GetBusInfo; error = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_vmi_businf_resp)); if (error) { device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", error); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; if (vmi_resp->Status != ST_OK) { device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", vmi_resp->Status); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); found = 0; for (i = 0; i < businfo.BusCount; i++) { if (businfo.BusValid[i] != AAC_BUS_VALID) continue; caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim), M_AACBUF, M_NOWAIT | M_ZERO); if (caminf == NULL) { device_printf(sc->aac_dev, "No memory to add passthrough bus %d\n", i); break; } child = device_add_child(sc->aac_dev, "aacp", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->aac_dev, "device_add_child failed for passthrough bus %d\n", i); free(caminf, M_AACBUF); break; } caminf->TargetsPerBus = businfo.TargetsPerBus; caminf->BusNumber = i; caminf->InitiatorBusId = businfo.InitiatorBusId[i]; caminf->aac_sc = sc; caminf->sim_dev = child; device_set_ivars(child, caminf); device_set_desc(child, "SCSI Passthrough Bus"); TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); found = 1; } if (found) bus_attach_children(sc->aac_dev); } diff --git a/sys/dev/aacraid/aacraid.c b/sys/dev/aacraid/aacraid.c index 9e9b1c602b9d..90a073d10039 100644 --- a/sys/dev/aacraid/aacraid.c +++ b/sys/dev/aacraid/aacraid.c @@ -1,3926 +1,3927 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2000 Michael Smith * Copyright (c) 2001 Scott Long * Copyright (c) 2000 BSDi * Copyright (c) 2001-2010 Adaptec, Inc. * Copyright (c) 2010-2012 PMC-Sierra, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Driver for the Adaptec by PMC Series 6,7,8,... families of RAID controllers */ #define AAC_DRIVERNAME "aacraid" #include "opt_aacraid.h" /* #include */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifndef FILTER_HANDLED #define FILTER_HANDLED 0x02 #endif static void aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f, u_int32_t uid); static void aac_get_bus_info(struct aac_softc *sc); static void aac_container_bus(struct aac_softc *sc); static void aac_daemon(void *arg); static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw, int pages, int nseg, int nseg_new); /* Command Processing */ static void aac_timeout(struct aac_softc *sc); static void aac_command_thread(struct aac_softc *sc); static int aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, struct aac_fib *fib, u_int16_t datasize); /* Command Buffer Management */ static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int aac_alloc_commands(struct aac_softc *sc); static void aac_free_commands(struct aac_softc *sc); static void aac_unmap_command(struct aac_command *cm); /* Hardware Interface */ static int aac_alloc(struct aac_softc *sc); static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error); static int aac_check_firmware(struct aac_softc *sc); static void aac_define_int_mode(struct aac_softc *sc); static int aac_init(struct aac_softc *sc); static int aac_find_pci_capability(struct aac_softc *sc, int cap); static int aac_setup_intr(struct aac_softc *sc); static int aac_check_config(struct aac_softc *sc); /* PMC SRC interface */ static int aac_src_get_fwstatus(struct aac_softc *sc); static void aac_src_qnotify(struct aac_softc *sc, int qbit); static int aac_src_get_istatus(struct aac_softc *sc); static void aac_src_clear_istatus(struct aac_softc *sc, int mask); static void aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_src_get_mailbox(struct aac_softc *sc, int mb); static void aac_src_access_devreg(struct aac_softc *sc, int mode); static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm); static int aac_src_get_outb_queue(struct aac_softc *sc); static void aac_src_set_outb_queue(struct aac_softc *sc, int index); struct aac_interface aacraid_src_interface = { aac_src_get_fwstatus, aac_src_qnotify, aac_src_get_istatus, aac_src_clear_istatus, aac_src_set_mailbox, aac_src_get_mailbox, aac_src_access_devreg, aac_src_send_command, aac_src_get_outb_queue, aac_src_set_outb_queue }; /* PMC SRCv interface */ static void aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3); static int aac_srcv_get_mailbox(struct aac_softc *sc, int mb); struct aac_interface aacraid_srcv_interface = { aac_src_get_fwstatus, aac_src_qnotify, aac_src_get_istatus, aac_src_clear_istatus, aac_srcv_set_mailbox, aac_srcv_get_mailbox, aac_src_access_devreg, aac_src_send_command, aac_src_get_outb_queue, aac_src_set_outb_queue }; /* Debugging and Diagnostics */ static struct aac_code_lookup aac_cpu_variant[] = { {"i960JX", CPUI960_JX}, {"i960CX", CPUI960_CX}, {"i960HX", CPUI960_HX}, {"i960RX", CPUI960_RX}, {"i960 80303", CPUI960_80303}, {"StrongARM SA110", CPUARM_SA110}, {"PPC603e", CPUPPC_603e}, {"XScale 80321", CPU_XSCALE_80321}, {"MIPS 4KC", CPU_MIPS_4KC}, {"MIPS 5KC", CPU_MIPS_5KC}, {"Unknown StrongARM", CPUARM_xxx}, {"Unknown PowerPC", CPUPPC_xxx}, {NULL, 0}, {"Unknown processor", 0} }; static struct aac_code_lookup aac_battery_platform[] = { {"required battery present", PLATFORM_BAT_REQ_PRESENT}, {"REQUIRED BATTERY NOT PRESENT", PLATFORM_BAT_REQ_NOTPRESENT}, {"optional battery present", PLATFORM_BAT_OPT_PRESENT}, {"optional battery not installed", PLATFORM_BAT_OPT_NOTPRESENT}, {"no battery support", PLATFORM_BAT_NOT_SUPPORTED}, {NULL, 0}, {"unknown battery platform", 0} }; static void aac_describe_controller(struct aac_softc *sc); static char *aac_describe_code(struct aac_code_lookup *table, u_int32_t code); /* Management Interface */ static d_open_t aac_open; static d_ioctl_t aac_ioctl; static d_poll_t aac_poll; static void aac_cdevpriv_dtor(void *arg); static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib); static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg); static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib); static void aac_request_aif(struct aac_softc *sc); static int aac_rev_check(struct aac_softc *sc, caddr_t udata); static int aac_open_aif(struct aac_softc *sc, caddr_t arg); static int aac_close_aif(struct aac_softc *sc, caddr_t arg); static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg); static int aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr); static int aac_query_disk(struct aac_softc *sc, caddr_t uptr); static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr); static int aac_supported_features(struct aac_softc *sc, caddr_t uptr); static void aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg); static int aac_reset_adapter(struct aac_softc *sc); static int aac_get_container_info(struct aac_softc *sc, struct aac_fib *fib, int cid, struct aac_mntinforesp *mir, u_int32_t *uid); static u_int32_t aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled); static struct cdevsw aacraid_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = aac_open, .d_ioctl = aac_ioctl, .d_poll = aac_poll, .d_name = "aacraid", }; MALLOC_DEFINE(M_AACRAIDBUF, "aacraid_buf", "Buffers for the AACRAID driver"); /* sysctl node */ SYSCTL_NODE(_hw, OID_AUTO, aacraid, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "AACRAID driver parameters"); /* * Device Interface */ /* * Initialize the controller and softc */ int aacraid_attach(struct aac_softc *sc) { int error, unit; struct aac_fib *fib; struct aac_mntinforesp mir; int count = 0, i = 0; u_int32_t uid; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->hint_flags = device_get_flags(sc->aac_dev); /* * Initialize per-controller queues. */ aac_initq_free(sc); aac_initq_ready(sc); aac_initq_busy(sc); /* mark controller as suspended until we get ourselves organised */ sc->aac_state |= AAC_STATE_SUSPEND; /* * Check that the firmware on the card is supported. */ sc->msi_enabled = sc->msi_tupelo = FALSE; if ((error = aac_check_firmware(sc)) != 0) return(error); /* * Initialize locks */ mtx_init(&sc->aac_io_lock, "AACRAID I/O lock", NULL, MTX_DEF); TAILQ_INIT(&sc->aac_container_tqh); TAILQ_INIT(&sc->aac_ev_cmfree); /* Initialize the clock daemon callout. */ callout_init_mtx(&sc->aac_daemontime, &sc->aac_io_lock, 0); /* * Initialize the adapter. */ if ((error = aac_alloc(sc)) != 0) return(error); aac_define_int_mode(sc); if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) { if ((error = aac_init(sc)) != 0) return(error); } /* * Allocate and connect our interrupt. */ if ((error = aac_setup_intr(sc)) != 0) return(error); /* * Print a little information about the controller. */ aac_describe_controller(sc); /* * Make the control device. */ unit = device_get_unit(sc->aac_dev); sc->aac_dev_t = make_dev(&aacraid_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640, "aacraid%d", unit); sc->aac_dev_t->si_drv1 = sc; /* Create the AIF thread */ if (aac_kthread_create((void(*)(void *))aac_command_thread, sc, &sc->aifthread, 0, 0, "aacraid%daif", unit)) panic("Could not create AIF thread"); /* Register the shutdown method to only be called post-dump */ if ((sc->eh = EVENTHANDLER_REGISTER(shutdown_final, aacraid_shutdown, sc->aac_dev, SHUTDOWN_PRI_DEFAULT)) == NULL) device_printf(sc->aac_dev, "shutdown event registration failed\n"); /* Find containers */ mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); /* loop over possible containers */ do { if ((aac_get_container_info(sc, fib, i, &mir, &uid)) != 0) continue; if (i == 0) count = mir.MntRespCount; aac_add_container(sc, &mir, 0, uid); i++; } while ((i < count) && (i < AAC_MAX_CONTAINERS)); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); /* Register with CAM for the containers */ TAILQ_INIT(&sc->aac_sim_tqh); aac_container_bus(sc); /* Register with CAM for the non-DASD devices */ if ((sc->flags & AAC_FLAGS_ENABLE_CAM) != 0) aac_get_bus_info(sc); /* poke the bus to actually attach the child devices */ bus_attach_children(sc->aac_dev); /* mark the controller up */ sc->aac_state &= ~AAC_STATE_SUSPEND; /* enable interrupts now */ AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT); mtx_lock(&sc->aac_io_lock); callout_reset(&sc->aac_daemontime, 60 * hz, aac_daemon, sc); mtx_unlock(&sc->aac_io_lock); return(0); } static void aac_daemon(void *arg) { struct aac_softc *sc; struct timeval tv; struct aac_command *cm; struct aac_fib *fib; sc = arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); if (callout_pending(&sc->aac_daemontime) || callout_active(&sc->aac_daemontime) == 0) return; getmicrotime(&tv); if (!aacraid_alloc_command(sc, &cm)) { fib = cm->cm_fib; cm->cm_timestamp = time_uptime; cm->cm_datalen = 0; cm->cm_flags |= AAC_CMD_WAIT; fib->Header.Size = sizeof(struct aac_fib_header) + sizeof(u_int32_t); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC | AAC_FIBSTATE_FAST_RESPONSE; fib->Header.Command = SendHostTime; *(uint32_t *)fib->data = htole32(tv.tv_sec); aacraid_map_command_sg(cm, NULL, 0, 0); aacraid_release_command(cm); } callout_schedule(&sc->aac_daemontime, 30 * 60 * hz); } void aacraid_add_event(struct aac_softc *sc, struct aac_event *event) { switch (event->ev_type & AAC_EVENT_MASK) { case AAC_EVENT_CMFREE: TAILQ_INSERT_TAIL(&sc->aac_ev_cmfree, event, ev_links); break; default: device_printf(sc->aac_dev, "aac_add event: unknown event %d\n", event->ev_type); break; } return; } /* * Request information of container #cid */ static int aac_get_container_info(struct aac_softc *sc, struct aac_fib *sync_fib, int cid, struct aac_mntinforesp *mir, u_int32_t *uid) { struct aac_command *cm; struct aac_fib *fib; struct aac_mntinfo *mi; struct aac_cnt_config *ccfg; int rval; if (sync_fib == NULL) { if (aacraid_alloc_command(sc, &cm)) { device_printf(sc->aac_dev, "Warning, no free command available\n"); return (-1); } fib = cm->cm_fib; } else { fib = sync_fib; } mi = (struct aac_mntinfo *)&fib->data[0]; /* 4KB support?, 64-bit LBA? */ if (sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE) mi->Command = VM_NameServeAllBlk; else if (sc->flags & AAC_FLAGS_LBA_64BIT) mi->Command = VM_NameServe64; else mi->Command = VM_NameServe; mi->MntType = FT_FILESYS; mi->MntCount = cid; aac_mntinfo_tole(mi); if (sync_fib) { if (aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_mntinfo))) { device_printf(sc->aac_dev, "Error probing container %d\n", cid); return (-1); } } else { cm->cm_timestamp = time_uptime; cm->cm_datalen = 0; fib->Header.Size = sizeof(struct aac_fib_header) + sizeof(struct aac_mntinfo); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC | AAC_FIBSTATE_FAST_RESPONSE; fib->Header.Command = ContainerCommand; if (aacraid_wait_command(cm) != 0) { device_printf(sc->aac_dev, "Error probing container %d\n", cid); aacraid_release_command(cm); return (-1); } } bcopy(&fib->data[0], mir, sizeof(struct aac_mntinforesp)); aac_mntinforesp_toh(mir); /* UID */ *uid = cid; if (mir->MntTable[0].VolType != CT_NONE && !(mir->MntTable[0].ContentState & AAC_FSCS_HIDDEN)) { if (!(sc->aac_support_opt2 & AAC_SUPPORTED_VARIABLE_BLOCK_SIZE)) { mir->MntTable[0].ObjExtension.BlockDevice.BlockSize = 0x200; mir->MntTable[0].ObjExtension.BlockDevice.bdLgclPhysMap = 0; } ccfg = (struct aac_cnt_config *)&fib->data[0]; bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE); ccfg->Command = VM_ContainerConfig; ccfg->CTCommand.command = CT_CID_TO_32BITS_UID; ccfg->CTCommand.param[0] = cid; aac_cnt_config_tole(ccfg); if (sync_fib) { rval = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_cnt_config)); aac_cnt_config_toh(ccfg); if (rval == 0 && ccfg->Command == ST_OK && ccfg->CTCommand.param[0] == CT_OK && mir->MntTable[0].VolType != CT_PASSTHRU) *uid = ccfg->CTCommand.param[1]; } else { fib->Header.Size = sizeof(struct aac_fib_header) + sizeof(struct aac_cnt_config); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC | AAC_FIBSTATE_FAST_RESPONSE; fib->Header.Command = ContainerCommand; rval = aacraid_wait_command(cm); aac_cnt_config_toh(ccfg); if (rval == 0 && ccfg->Command == ST_OK && ccfg->CTCommand.param[0] == CT_OK && mir->MntTable[0].VolType != CT_PASSTHRU) *uid = ccfg->CTCommand.param[1]; aacraid_release_command(cm); } } return (0); } /* * Create a device to represent a new container */ static void aac_add_container(struct aac_softc *sc, struct aac_mntinforesp *mir, int f, u_int32_t uid) { struct aac_container *co; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Check container volume type for validity. Note that many of * the possible types may never show up. */ if ((mir->Status == ST_OK) && (mir->MntTable[0].VolType != CT_NONE)) { co = (struct aac_container *)malloc(sizeof *co, M_AACRAIDBUF, M_NOWAIT | M_ZERO); if (co == NULL) { panic("Out of memory?!"); } co->co_found = f; bcopy(&mir->MntTable[0], &co->co_mntobj, sizeof(struct aac_mntobj)); co->co_uid = uid; TAILQ_INSERT_TAIL(&sc->aac_container_tqh, co, co_link); } } /* * Allocate resources associated with (sc) */ static int aac_alloc(struct aac_softc *sc) { bus_size_t maxsize; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Create DMA tag for mapping buffers into controller-addressable space. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_SG_64BIT) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ AAC_MAXIO_SIZE(sc), /* maxsize */ sc->aac_sg_tablesize, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->aac_io_lock, /* lockfuncarg */ &sc->aac_buffer_dmat)) { device_printf(sc->aac_dev, "can't allocate buffer DMA tag\n"); return (ENOMEM); } /* * Create DMA tag for mapping FIBs into controller-addressable space.. */ if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + sizeof(struct aac_fib_xporthdr) + 31); else maxsize = sc->aac_max_fibs_alloc * (sc->aac_max_fib_size + 31); if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_4GB_WINDOW) ? BUS_SPACE_MAXADDR_32BIT : 0x7fffffff, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ maxsize, /* maxsize */ 1, /* nsegments */ maxsize, /* maxsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &sc->aac_fib_dmat)) { device_printf(sc->aac_dev, "can't allocate FIB DMA tag\n"); return (ENOMEM); } /* * Create DMA tag for the common structure and allocate it. */ maxsize = sizeof(struct aac_common); maxsize += sc->aac_max_fibs * sizeof(u_int32_t); if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_4GB_WINDOW) ? BUS_SPACE_MAXADDR_32BIT : 0x7fffffff, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ maxsize, /* maxsize */ 1, /* nsegments */ maxsize, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &sc->aac_common_dmat)) { device_printf(sc->aac_dev, "can't allocate common structure DMA tag\n"); return (ENOMEM); } if (bus_dmamem_alloc(sc->aac_common_dmat, (void **)&sc->aac_common, BUS_DMA_NOWAIT, &sc->aac_common_dmamap)) { device_printf(sc->aac_dev, "can't allocate common structure\n"); return (ENOMEM); } (void)bus_dmamap_load(sc->aac_common_dmat, sc->aac_common_dmamap, sc->aac_common, maxsize, aac_common_map, sc, 0); bzero(sc->aac_common, maxsize); /* Allocate some FIBs and associated command structs */ TAILQ_INIT(&sc->aac_fibmap_tqh); sc->aac_commands = malloc(sc->aac_max_fibs * sizeof(struct aac_command), M_AACRAIDBUF, M_WAITOK|M_ZERO); mtx_lock(&sc->aac_io_lock); while (sc->total_fibs < sc->aac_max_fibs) { if (aac_alloc_commands(sc) != 0) break; } mtx_unlock(&sc->aac_io_lock); if (sc->total_fibs == 0) return (ENOMEM); return (0); } /* * Free all of the resources associated with (sc) * * Should not be called if the controller is active. */ void aacraid_free(struct aac_softc *sc) { int i; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* remove the control device */ if (sc->aac_dev_t != NULL) destroy_dev(sc->aac_dev_t); /* throw away any FIB buffers, discard the FIB DMA tag */ aac_free_commands(sc); if (sc->aac_fib_dmat) bus_dma_tag_destroy(sc->aac_fib_dmat); free(sc->aac_commands, M_AACRAIDBUF); /* destroy the common area */ if (sc->aac_common) { bus_dmamap_unload(sc->aac_common_dmat, sc->aac_common_dmamap); bus_dmamem_free(sc->aac_common_dmat, sc->aac_common, sc->aac_common_dmamap); } if (sc->aac_common_dmat) bus_dma_tag_destroy(sc->aac_common_dmat); /* disconnect the interrupt handler */ for (i = 0; i < AAC_MAX_MSIX; ++i) { if (sc->aac_intr[i]) bus_teardown_intr(sc->aac_dev, sc->aac_irq[i], sc->aac_intr[i]); if (sc->aac_irq[i]) bus_release_resource(sc->aac_dev, SYS_RES_IRQ, sc->aac_irq_rid[i], sc->aac_irq[i]); else break; } if (sc->msi_enabled || sc->msi_tupelo) pci_release_msi(sc->aac_dev); /* destroy data-transfer DMA tag */ if (sc->aac_buffer_dmat) bus_dma_tag_destroy(sc->aac_buffer_dmat); /* destroy the parent DMA tag */ if (sc->aac_parent_dmat) bus_dma_tag_destroy(sc->aac_parent_dmat); /* release the register window mapping */ if (sc->aac_regs_res0 != NULL) bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, sc->aac_regs_rid0, sc->aac_regs_res0); if (sc->aac_regs_res1 != NULL) bus_release_resource(sc->aac_dev, SYS_RES_MEMORY, sc->aac_regs_rid1, sc->aac_regs_res1); } /* * Disconnect from the controller completely, in preparation for unload. */ int aacraid_detach(device_t dev) { struct aac_softc *sc; struct aac_container *co; struct aac_sim *sim; int error; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); + error = bus_generic_detach(dev); + if (error != 0) + return (error); + callout_drain(&sc->aac_daemontime); /* Remove the child containers */ while ((co = TAILQ_FIRST(&sc->aac_container_tqh)) != NULL) { TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); free(co, M_AACRAIDBUF); } /* Remove the CAM SIMs */ while ((sim = TAILQ_FIRST(&sc->aac_sim_tqh)) != NULL) { TAILQ_REMOVE(&sc->aac_sim_tqh, sim, sim_link); - error = device_delete_child(dev, sim->sim_dev); - if (error) - return (error); free(sim, M_AACRAIDBUF); } if (sc->aifflags & AAC_AIFFLAGS_RUNNING) { sc->aifflags |= AAC_AIFFLAGS_EXIT; wakeup(sc->aifthread); tsleep(sc->aac_dev, PUSER | PCATCH, "aac_dch", 30 * hz); } if (sc->aifflags & AAC_AIFFLAGS_RUNNING) panic("Cannot shutdown AIF thread"); if ((error = aacraid_shutdown(dev))) return(error); EVENTHANDLER_DEREGISTER(shutdown_final, sc->eh); aacraid_free(sc); mtx_destroy(&sc->aac_io_lock); return(0); } /* * Bring the controller down to a dormant state and detach all child devices. * * This function is called before detach or system shutdown. * * Note that we can assume that the bioq on the controller is empty, as we won't * allow shutdown if any device is open. */ int aacraid_shutdown(device_t dev) { struct aac_softc *sc; struct aac_fib *fib; struct aac_close_command *cc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state |= AAC_STATE_SUSPEND; /* * Send a Container shutdown followed by a HostShutdown FIB to the * controller to convince it that we don't want to talk to it anymore. * We've been closed and all I/O completed already */ device_printf(sc->aac_dev, "shutting down controller..."); mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); cc = (struct aac_close_command *)&fib->data[0]; bzero(cc, sizeof(struct aac_close_command)); cc->Command = htole32(VM_CloseAll); cc->ContainerId = htole32(0xfffffffe); if (aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_close_command))) printf("FAILED.\n"); else printf("done\n"); AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return(0); } /* * Bring the controller to a quiescent state, ready for system suspend. */ int aacraid_suspend(device_t dev) { struct aac_softc *sc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state |= AAC_STATE_SUSPEND; AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT); return(0); } /* * Bring the controller back to a state ready for operation. */ int aacraid_resume(device_t dev) { struct aac_softc *sc; sc = device_get_softc(dev); fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_state &= ~AAC_STATE_SUSPEND; AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT); return(0); } /* * Interrupt handler for NEW_COMM_TYPE1, NEW_COMM_TYPE2, NEW_COMM_TYPE34 interface. */ void aacraid_new_intr_type1(void *arg) { struct aac_msix_ctx *ctx; struct aac_softc *sc; int vector_no; struct aac_command *cm; struct aac_fib *fib; u_int32_t bellbits, bellbits_shifted, index, handle; int isFastResponse, isAif, noMoreAif, mode; ctx = (struct aac_msix_ctx *)arg; sc = ctx->sc; vector_no = ctx->vector_no; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); if (sc->msi_enabled) { mode = AAC_INT_MODE_MSI; if (vector_no == 0) { bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI); if (bellbits & 0x40000) mode |= AAC_INT_MODE_AIF; else if (bellbits & 0x1000) mode |= AAC_INT_MODE_SYNC; } } else { mode = AAC_INT_MODE_INTX; bellbits = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R); if (bellbits & AAC_DB_RESPONSE_SENT_NS) { bellbits = AAC_DB_RESPONSE_SENT_NS; AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits); } else { bellbits_shifted = (bellbits >> AAC_SRC_ODR_SHIFT); AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, bellbits); if (bellbits_shifted & AAC_DB_AIF_PENDING) mode |= AAC_INT_MODE_AIF; if (bellbits_shifted & AAC_DB_SYNC_COMMAND) mode |= AAC_INT_MODE_SYNC; } /* ODR readback, Prep #238630 */ AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R); } if (mode & AAC_INT_MODE_SYNC) { if (sc->aac_sync_cm) { cm = sc->aac_sync_cm; aac_unmap_command(cm); cm->cm_flags |= AAC_CMD_COMPLETED; aac_fib_header_toh(&cm->cm_fib->Header); /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this command */ wakeup(cm); } sc->flags &= ~AAC_QUEUE_FRZN; sc->aac_sync_cm = NULL; } if (mode & AAC_INT_MODE_INTX) mode &= ~AAC_INT_MODE_SYNC; else mode = 0; } if (mode & AAC_INT_MODE_AIF) { if (mode & AAC_INT_MODE_INTX) { aac_request_aif(sc); mode = 0; } } if (sc->flags & AAC_FLAGS_SYNC_MODE) mode = 0; if (mode) { /* handle async. status */ index = sc->aac_host_rrq_idx[vector_no]; for (;;) { isFastResponse = isAif = noMoreAif = 0; /* remove toggle bit (31) */ handle = (le32toh(sc->aac_common->ac_host_rrq[index]) & 0x7fffffff); /* check fast response bit (30) */ if (handle & 0x40000000) isFastResponse = 1; /* check AIF bit (23) */ else if (handle & 0x00800000) isAif = TRUE; handle &= 0x0000ffff; if (handle == 0) break; cm = sc->aac_commands + (handle - 1); fib = cm->cm_fib; aac_fib_header_toh(&fib->Header); sc->aac_rrq_outstanding[vector_no]--; if (isAif) { noMoreAif = (fib->Header.XferState & AAC_FIBSTATE_NOMOREAIF) ? 1:0; if (!noMoreAif) aac_handle_aif(sc, fib); aac_remove_busy(cm); aacraid_release_command(cm); } else { if (isFastResponse) { fib->Header.XferState |= AAC_FIBSTATE_DONEADAP; *((u_int32_t *)(fib->data)) = htole32(ST_OK); cm->cm_flags |= AAC_CMD_FASTRESP; } aac_remove_busy(cm); aac_unmap_command(cm); cm->cm_flags |= AAC_CMD_COMPLETED; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this command */ wakeup(cm); } sc->flags &= ~AAC_QUEUE_FRZN; } sc->aac_common->ac_host_rrq[index++] = 0; if (index == (vector_no + 1) * sc->aac_vector_cap) index = vector_no * sc->aac_vector_cap; sc->aac_host_rrq_idx[vector_no] = index; if ((isAif && !noMoreAif) || sc->aif_pending) aac_request_aif(sc); } } if (mode & AAC_INT_MODE_AIF) { aac_request_aif(sc); AAC_ACCESS_DEVREG(sc, AAC_CLEAR_AIF_BIT); mode = 0; } /* see if we can start some more I/O */ if ((sc->flags & AAC_QUEUE_FRZN) == 0) aacraid_startio(sc); mtx_unlock(&sc->aac_io_lock); } /* * Handle notification of one or more FIBs coming from the controller. */ static void aac_command_thread(struct aac_softc *sc) { int retval; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); sc->aifflags = AAC_AIFFLAGS_RUNNING; while ((sc->aifflags & AAC_AIFFLAGS_EXIT) == 0) { retval = 0; if ((sc->aifflags & AAC_AIFFLAGS_PENDING) == 0) retval = msleep(sc->aifthread, &sc->aac_io_lock, PRIBIO, "aacraid_aifthd", AAC_PERIODIC_INTERVAL * hz); /* * First see if any FIBs need to be allocated. */ if ((sc->aifflags & AAC_AIFFLAGS_ALLOCFIBS) != 0) { aac_alloc_commands(sc); sc->aifflags &= ~AAC_AIFFLAGS_ALLOCFIBS; aacraid_startio(sc); } /* * While we're here, check to see if any commands are stuck. * This is pretty low-priority, so it's ok if it doesn't * always fire. */ if (retval == EWOULDBLOCK) aac_timeout(sc); /* Check the hardware printf message buffer */ if (sc->aac_common->ac_printf[0] != 0) aac_print_printf(sc); } sc->aifflags &= ~AAC_AIFFLAGS_RUNNING; mtx_unlock(&sc->aac_io_lock); wakeup(sc->aac_dev); aac_kthread_exit(0); } /* * Submit a command to the controller, return when it completes. * XXX This is very dangerous! If the card has gone out to lunch, we could * be stuck here forever. At the same time, signals are not caught * because there is a risk that a signal could wakeup the sleep before * the card has a chance to complete the command. Since there is no way * to cancel a command that is in progress, we can't protect against the * card completing a command late and spamming the command and data * memory. So, we are held hostage until the command completes. */ int aacraid_wait_command(struct aac_command *cm) { struct aac_softc *sc; int error; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); /* Put the command on the ready queue and get things going */ aac_enqueue_ready(cm); aacraid_startio(sc); error = msleep(cm, &sc->aac_io_lock, PRIBIO, "aacraid_wait", 0); return(error); } /* *Command Buffer Management */ /* * Allocate a command. */ int aacraid_alloc_command(struct aac_softc *sc, struct aac_command **cmp) { struct aac_command *cm; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if ((cm = aac_dequeue_free(sc)) == NULL) { if (sc->total_fibs < sc->aac_max_fibs) { sc->aifflags |= AAC_AIFFLAGS_ALLOCFIBS; wakeup(sc->aifthread); } return (EBUSY); } *cmp = cm; return(0); } /* * Release a command back to the freelist. */ void aacraid_release_command(struct aac_command *cm) { struct aac_event *event; struct aac_softc *sc; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); /* (re)initialize the command/FIB */ cm->cm_sgtable = NULL; cm->cm_flags = 0; cm->cm_complete = NULL; cm->cm_ccb = NULL; cm->cm_passthr_dmat = 0; cm->cm_fib->Header.XferState = AAC_FIBSTATE_EMPTY; cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB; cm->cm_fib->Header.Unused = 0; cm->cm_fib->Header.SenderSize = cm->cm_sc->aac_max_fib_size; /* * These are duplicated in aac_start to cover the case where an * intermediate stage may have destroyed them. They're left * initialized here for debugging purposes only. */ cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; cm->cm_fib->Header.Handle = 0; aac_enqueue_free(cm); /* * Dequeue all events so that there's no risk of events getting * stranded. */ while ((event = TAILQ_FIRST(&sc->aac_ev_cmfree)) != NULL) { TAILQ_REMOVE(&sc->aac_ev_cmfree, event, ev_links); event->ev_callback(sc, event, event->ev_arg); } } /* * Map helper for command/FIB allocation. */ static void aac_map_command_helper(void *arg, bus_dma_segment_t *segs, int nseg, int error) { uint64_t *fibphys; fibphys = (uint64_t *)arg; *fibphys = segs[0].ds_addr; } /* * Allocate and initialize commands/FIBs for this adapter. */ static int aac_alloc_commands(struct aac_softc *sc) { struct aac_command *cm; struct aac_fibmap *fm; uint64_t fibphys; int i, error; u_int32_t maxsize; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); if (sc->total_fibs + sc->aac_max_fibs_alloc > sc->aac_max_fibs) return (ENOMEM); fm = malloc(sizeof(struct aac_fibmap), M_AACRAIDBUF, M_NOWAIT|M_ZERO); if (fm == NULL) return (ENOMEM); mtx_unlock(&sc->aac_io_lock); /* allocate the FIBs in DMAable memory and load them */ if (bus_dmamem_alloc(sc->aac_fib_dmat, (void **)&fm->aac_fibs, BUS_DMA_NOWAIT, &fm->aac_fibmap)) { device_printf(sc->aac_dev, "Not enough contiguous memory available.\n"); free(fm, M_AACRAIDBUF); mtx_lock(&sc->aac_io_lock); return (ENOMEM); } maxsize = sc->aac_max_fib_size + 31; if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) maxsize += sizeof(struct aac_fib_xporthdr); /* Ignore errors since this doesn't bounce */ (void)bus_dmamap_load(sc->aac_fib_dmat, fm->aac_fibmap, fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize, aac_map_command_helper, &fibphys, 0); mtx_lock(&sc->aac_io_lock); /* initialize constant fields in the command structure */ bzero(fm->aac_fibs, sc->aac_max_fibs_alloc * maxsize); for (i = 0; i < sc->aac_max_fibs_alloc; i++) { cm = sc->aac_commands + sc->total_fibs; fm->aac_commands = cm; cm->cm_sc = sc; cm->cm_fib = (struct aac_fib *) ((u_int8_t *)fm->aac_fibs + i * maxsize); cm->cm_fibphys = fibphys + i * maxsize; if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) { u_int64_t fibphys_aligned; fibphys_aligned = (cm->cm_fibphys + sizeof(struct aac_fib_xporthdr) + 31) & ~31; cm->cm_fib = (struct aac_fib *) ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys)); cm->cm_fibphys = fibphys_aligned; } else { u_int64_t fibphys_aligned; fibphys_aligned = (cm->cm_fibphys + 31) & ~31; cm->cm_fib = (struct aac_fib *) ((u_int8_t *)cm->cm_fib + (fibphys_aligned - cm->cm_fibphys)); cm->cm_fibphys = fibphys_aligned; } cm->cm_index = sc->total_fibs; if ((error = bus_dmamap_create(sc->aac_buffer_dmat, 0, &cm->cm_datamap)) != 0) break; if (sc->aac_max_fibs <= 1 || sc->aac_max_fibs - sc->total_fibs > 1) aacraid_release_command(cm); sc->total_fibs++; } if (i > 0) { TAILQ_INSERT_TAIL(&sc->aac_fibmap_tqh, fm, fm_link); fwprintf(sc, HBA_FLAGS_DBG_COMM_B, "total_fibs= %d\n", sc->total_fibs); return (0); } bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); free(fm, M_AACRAIDBUF); return (ENOMEM); } /* * Free FIBs owned by this adapter. */ static void aac_free_commands(struct aac_softc *sc) { struct aac_fibmap *fm; struct aac_command *cm; int i; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); while ((fm = TAILQ_FIRST(&sc->aac_fibmap_tqh)) != NULL) { TAILQ_REMOVE(&sc->aac_fibmap_tqh, fm, fm_link); /* * We check against total_fibs to handle partially * allocated blocks. */ for (i = 0; i < sc->aac_max_fibs_alloc && sc->total_fibs--; i++) { cm = fm->aac_commands + i; bus_dmamap_destroy(sc->aac_buffer_dmat, cm->cm_datamap); } bus_dmamap_unload(sc->aac_fib_dmat, fm->aac_fibmap); bus_dmamem_free(sc->aac_fib_dmat, fm->aac_fibs, fm->aac_fibmap); free(fm, M_AACRAIDBUF); } } /* * Command-mapping helper function - populate this command's s/g table. */ void aacraid_map_command_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aac_softc *sc; struct aac_command *cm; struct aac_fib *fib; int i; cm = (struct aac_command *)arg; sc = cm->cm_sc; fib = cm->cm_fib; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "nseg %d", nseg); mtx_assert(&sc->aac_io_lock, MA_OWNED); if ((sc->flags & AAC_FLAGS_SYNC_MODE) && sc->aac_sync_cm) return; /* copy into the FIB */ if (cm->cm_sgtable != NULL) { if (fib->Header.Command == RawIo2) { struct aac_raw_io2 *raw; struct aac_sge_ieee1212 *sg; u_int32_t min_size = PAGE_SIZE, cur_size; int conformable = TRUE; raw = (struct aac_raw_io2 *)&fib->data[0]; sg = (struct aac_sge_ieee1212 *)cm->cm_sgtable; raw->sgeCnt = nseg; for (i = 0; i < nseg; i++) { cur_size = segs[i].ds_len; sg[i].addrHigh = 0; *(bus_addr_t *)&sg[i].addrLow = segs[i].ds_addr; sg[i].length = cur_size; sg[i].flags = 0; if (i == 0) { raw->sgeFirstSize = cur_size; } else if (i == 1) { raw->sgeNominalSize = cur_size; min_size = cur_size; } else if ((i+1) < nseg && cur_size != raw->sgeNominalSize) { conformable = FALSE; if (cur_size < min_size) min_size = cur_size; } } /* not conformable: evaluate required sg elements */ if (!conformable) { int j, err_found, nseg_new = nseg; for (i = min_size / PAGE_SIZE; i >= 1; --i) { err_found = FALSE; nseg_new = 2; for (j = 1; j < nseg - 1; ++j) { if (sg[j].length % (i*PAGE_SIZE)) { err_found = TRUE; break; } nseg_new += (sg[j].length / (i*PAGE_SIZE)); } if (!err_found) break; } if (i>0 && nseg_new<=sc->aac_sg_tablesize && !(sc->hint_flags & 4)) nseg = aac_convert_sgraw2(sc, raw, i, nseg, nseg_new); } else { raw->flags |= RIO2_SGL_CONFORMANT; } for (i = 0; i < nseg; i++) aac_sge_ieee1212_tole(sg + i); aac_raw_io2_tole(raw); /* update the FIB size for the s/g count */ fib->Header.Size += nseg * sizeof(struct aac_sge_ieee1212); } else if (fib->Header.Command == RawIo) { struct aac_sg_tableraw *sg; sg = (struct aac_sg_tableraw *)cm->cm_sgtable; sg->SgCount = htole32(nseg); for (i = 0; i < nseg; i++) { sg->SgEntryRaw[i].SgAddress = segs[i].ds_addr; sg->SgEntryRaw[i].SgByteCount = segs[i].ds_len; sg->SgEntryRaw[i].Next = 0; sg->SgEntryRaw[i].Prev = 0; sg->SgEntryRaw[i].Flags = 0; aac_sg_entryraw_tole(&sg->SgEntryRaw[i]); } aac_raw_io_tole((struct aac_raw_io *)&fib->data[0]); /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entryraw); } else if ((cm->cm_sc->flags & AAC_FLAGS_SG_64BIT) == 0) { struct aac_sg_table *sg; sg = cm->cm_sgtable; sg->SgCount = htole32(nseg); for (i = 0; i < nseg; i++) { sg->SgEntry[i].SgAddress = segs[i].ds_addr; sg->SgEntry[i].SgByteCount = segs[i].ds_len; aac_sg_entry_tole(&sg->SgEntry[i]); } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entry); } else { struct aac_sg_table64 *sg; sg = (struct aac_sg_table64 *)cm->cm_sgtable; sg->SgCount = htole32(nseg); for (i = 0; i < nseg; i++) { sg->SgEntry64[i].SgAddress = segs[i].ds_addr; sg->SgEntry64[i].SgByteCount = segs[i].ds_len; aac_sg_entry64_tole(&sg->SgEntry64[i]); } /* update the FIB size for the s/g count */ fib->Header.Size += nseg*sizeof(struct aac_sg_entry64); } } /* Fix up the address values in the FIB. Use the command array index * instead of a pointer since these fields are only 32 bits. Shift * the SenderFibAddress over to make room for the fast response bit * and for the AIF bit */ cm->cm_fib->Header.SenderFibAddress = (cm->cm_index << 2); cm->cm_fib->Header.u.ReceiverFibAddress = (u_int32_t)cm->cm_fibphys; /* save a pointer to the command for speedy reverse-lookup */ cm->cm_fib->Header.Handle += cm->cm_index + 1; if (cm->cm_passthr_dmat == 0) { if (cm->cm_flags & AAC_CMD_DATAIN) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_PREREAD); if (cm->cm_flags & AAC_CMD_DATAOUT) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_PREWRITE); } cm->cm_flags |= AAC_CMD_MAPPED; if (cm->cm_flags & AAC_CMD_WAIT) { aac_fib_header_tole(&fib->Header); aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, NULL, NULL); } else if (sc->flags & AAC_FLAGS_SYNC_MODE) { u_int32_t wait = 0; sc->aac_sync_cm = cm; aac_fib_header_tole(&fib->Header); aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, cm->cm_fibphys, 0, 0, 0, &wait, NULL); } else { int count = 10000000L; while (AAC_SEND_COMMAND(sc, cm) != 0) { if (--count == 0) { aac_unmap_command(cm); sc->flags |= AAC_QUEUE_FRZN; aac_requeue_ready(cm); } DELAY(5); /* wait 5 usec. */ } } } static int aac_convert_sgraw2(struct aac_softc *sc, struct aac_raw_io2 *raw, int pages, int nseg, int nseg_new) { struct aac_sge_ieee1212 *sge; int i, j, pos; u_int32_t addr_low; sge = malloc(nseg_new * sizeof(struct aac_sge_ieee1212), M_AACRAIDBUF, M_NOWAIT|M_ZERO); if (sge == NULL) return nseg; for (i = 1, pos = 1; i < nseg - 1; ++i) { for (j = 0; j < raw->sge[i].length / (pages*PAGE_SIZE); ++j) { addr_low = raw->sge[i].addrLow + j * pages * PAGE_SIZE; sge[pos].addrLow = addr_low; sge[pos].addrHigh = raw->sge[i].addrHigh; if (addr_low < raw->sge[i].addrLow) sge[pos].addrHigh++; sge[pos].length = pages * PAGE_SIZE; sge[pos].flags = 0; pos++; } } sge[pos] = raw->sge[nseg-1]; for (i = 1; i < nseg_new; ++i) raw->sge[i] = sge[i]; free(sge, M_AACRAIDBUF); raw->sgeCnt = nseg_new; raw->flags |= RIO2_SGL_CONFORMANT; raw->sgeNominalSize = pages * PAGE_SIZE; return nseg_new; } /* * Unmap a command from controller-visible space. */ static void aac_unmap_command(struct aac_command *cm) { struct aac_softc *sc; sc = cm->cm_sc; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (!(cm->cm_flags & AAC_CMD_MAPPED)) return; if (cm->cm_datalen != 0 && cm->cm_passthr_dmat == 0) { if (cm->cm_flags & AAC_CMD_DATAIN) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_POSTREAD); if (cm->cm_flags & AAC_CMD_DATAOUT) bus_dmamap_sync(sc->aac_buffer_dmat, cm->cm_datamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->aac_buffer_dmat, cm->cm_datamap); } cm->cm_flags &= ~AAC_CMD_MAPPED; } /* * Hardware Interface */ /* * Initialize the adapter. */ static void aac_common_map(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct aac_softc *sc; sc = (struct aac_softc *)arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); sc->aac_common_busaddr = segs[0].ds_addr; } static int aac_check_firmware(struct aac_softc *sc) { u_int32_t code, major, minor, maxsize; u_int32_t options = 0, atu_size = 0, status, waitCount; time_t then; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* check if flash update is running */ if (AAC_GET_FWSTATUS(sc) & AAC_FLASH_UPD_PENDING) { then = time_uptime; do { code = AAC_GET_FWSTATUS(sc); if (time_uptime > (then + AAC_FWUPD_TIMEOUT)) { device_printf(sc->aac_dev, "FATAL: controller not coming ready, " "status %x\n", code); return(ENXIO); } } while (!(code & AAC_FLASH_UPD_SUCCESS) && !(code & AAC_FLASH_UPD_FAILED)); /* * Delay 10 seconds. Because right now FW is doing a soft reset, * do not read scratch pad register at this time */ waitCount = 10 * 10000; while (waitCount) { DELAY(100); /* delay 100 microseconds */ waitCount--; } } /* * Wait for the adapter to come ready. */ then = time_uptime; do { code = AAC_GET_FWSTATUS(sc); if (time_uptime > (then + AAC_BOOT_TIMEOUT)) { device_printf(sc->aac_dev, "FATAL: controller not coming ready, " "status %x\n", code); return(ENXIO); } } while (!(code & AAC_UP_AND_RUNNING) || code == 0xffffffff); /* * Retrieve the firmware version numbers. Dell PERC2/QC cards with * firmware version 1.x are not compatible with this driver. */ if (sc->flags & AAC_FLAGS_PERC2QC) { if (aacraid_sync_command(sc, AAC_MONKER_GETKERNVER, 0, 0, 0, 0, NULL, NULL)) { device_printf(sc->aac_dev, "Error reading firmware version\n"); return (EIO); } /* These numbers are stored as ASCII! */ major = (AAC_GET_MAILBOX(sc, 1) & 0xff) - 0x30; minor = (AAC_GET_MAILBOX(sc, 2) & 0xff) - 0x30; if (major == 1) { device_printf(sc->aac_dev, "Firmware version %d.%d is not supported.\n", major, minor); return (EINVAL); } } /* * Retrieve the capabilities/supported options word so we know what * work-arounds to enable. Some firmware revs don't support this * command. */ if (aacraid_sync_command(sc, AAC_MONKER_GETINFO, 0, 0, 0, 0, &status, NULL)) { if (status != AAC_SRB_STS_INVALID_REQUEST) { device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); return (EIO); } } else { options = AAC_GET_MAILBOX(sc, 1); atu_size = AAC_GET_MAILBOX(sc, 2); sc->supported_options = options; sc->doorbell_mask = AAC_GET_MAILBOX(sc, 3); if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 && (sc->flags & AAC_FLAGS_NO4GB) == 0) sc->flags |= AAC_FLAGS_4GB_WINDOW; if (options & AAC_SUPPORTED_NONDASD) sc->flags |= AAC_FLAGS_ENABLE_CAM; if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0 && (sizeof(bus_addr_t) > 4) && (sc->hint_flags & 0x1)) { device_printf(sc->aac_dev, "Enabling 64-bit address support\n"); sc->flags |= AAC_FLAGS_SG_64BIT; } if (sc->aac_if.aif_send_command) { if (options & AAC_SUPPORTED_NEW_COMM_TYPE2) sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE2; else if (options & AAC_SUPPORTED_NEW_COMM_TYPE1) sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE1; else if ((options & AAC_SUPPORTED_NEW_COMM_TYPE3) || (options & AAC_SUPPORTED_NEW_COMM_TYPE4)) sc->flags |= AAC_FLAGS_NEW_COMM | AAC_FLAGS_NEW_COMM_TYPE34; } if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) sc->flags |= AAC_FLAGS_ARRAY_64BIT; } if (!(sc->flags & AAC_FLAGS_NEW_COMM)) { device_printf(sc->aac_dev, "Communication interface not supported!\n"); return (ENXIO); } if (sc->hint_flags & 2) { device_printf(sc->aac_dev, "Sync. mode enforced by driver parameter. This will cause a significant performance decrease!\n"); sc->flags |= AAC_FLAGS_SYNC_MODE; } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE34) { device_printf(sc->aac_dev, "Async. mode not supported by current driver, sync. mode enforced.\nPlease update driver to get full performance.\n"); sc->flags |= AAC_FLAGS_SYNC_MODE; } /* Check for broken hardware that does a lower number of commands */ sc->aac_max_fibs = (sc->flags & AAC_FLAGS_256FIBS ? 256:512); /* Remap mem. resource, if required */ if (atu_size > rman_get_size(sc->aac_regs_res0)) { bus_release_resource( sc->aac_dev, SYS_RES_MEMORY, sc->aac_regs_rid0, sc->aac_regs_res0); sc->aac_regs_res0 = bus_alloc_resource_anywhere( sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0, atu_size, RF_ACTIVE); if (sc->aac_regs_res0 == NULL) { sc->aac_regs_res0 = bus_alloc_resource_any( sc->aac_dev, SYS_RES_MEMORY, &sc->aac_regs_rid0, RF_ACTIVE); if (sc->aac_regs_res0 == NULL) { device_printf(sc->aac_dev, "couldn't allocate register window\n"); return (ENXIO); } } sc->aac_btag0 = rman_get_bustag(sc->aac_regs_res0); sc->aac_bhandle0 = rman_get_bushandle(sc->aac_regs_res0); } /* Read preferred settings */ sc->aac_max_fib_size = sizeof(struct aac_fib); sc->aac_max_sectors = 128; /* 64KB */ sc->aac_max_aif = 1; if (sc->flags & AAC_FLAGS_SG_64BIT) sc->aac_sg_tablesize = (AAC_FIB_DATASIZE - sizeof(struct aac_blockwrite64)) / sizeof(struct aac_sg_entry64); else sc->aac_sg_tablesize = (AAC_FIB_DATASIZE - sizeof(struct aac_blockwrite)) / sizeof(struct aac_sg_entry); if (!aacraid_sync_command(sc, AAC_MONKER_GETCOMMPREF, 0, 0, 0, 0, NULL, NULL)) { options = AAC_GET_MAILBOX(sc, 1); sc->aac_max_fib_size = (options & 0xFFFF); sc->aac_max_sectors = (options >> 16) << 1; options = AAC_GET_MAILBOX(sc, 2); sc->aac_sg_tablesize = (options >> 16); options = AAC_GET_MAILBOX(sc, 3); sc->aac_max_fibs = ((options >> 16) & 0xFFFF); if (sc->aac_max_fibs == 0 || sc->aac_hwif != AAC_HWIF_SRCV) sc->aac_max_fibs = (options & 0xFFFF); options = AAC_GET_MAILBOX(sc, 4); sc->aac_max_aif = (options & 0xFFFF); options = AAC_GET_MAILBOX(sc, 5); sc->aac_max_msix =(sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) ? options : 0; } maxsize = sc->aac_max_fib_size + 31; if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) maxsize += sizeof(struct aac_fib_xporthdr); if (maxsize > PAGE_SIZE) { sc->aac_max_fib_size -= (maxsize - PAGE_SIZE); maxsize = PAGE_SIZE; } sc->aac_max_fibs_alloc = PAGE_SIZE / maxsize; if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { sc->flags |= AAC_FLAGS_RAW_IO; device_printf(sc->aac_dev, "Enable Raw I/O\n"); } if ((sc->flags & AAC_FLAGS_RAW_IO) && (sc->flags & AAC_FLAGS_ARRAY_64BIT)) { sc->flags |= AAC_FLAGS_LBA_64BIT; device_printf(sc->aac_dev, "Enable 64-bit array\n"); } #ifdef AACRAID_DEBUG aacraid_get_fw_debug_buffer(sc); #endif return (0); } static int aac_init(struct aac_softc *sc) { struct aac_adapter_init *ip; int i, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* reset rrq index */ sc->aac_fibs_pushed_no = 0; for (i = 0; i < sc->aac_max_msix; i++) sc->aac_host_rrq_idx[i] = i * sc->aac_vector_cap; /* * Fill in the init structure. This tells the adapter about the * physical location of various important shared data structures. */ ip = &sc->aac_common->ac_init; ip->InitStructRevision = AAC_INIT_STRUCT_REVISION; if (sc->aac_max_fib_size > sizeof(struct aac_fib)) { ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_4; sc->flags |= AAC_FLAGS_RAW_IO; } ip->NoOfMSIXVectors = sc->aac_max_msix; ip->AdapterFibsPhysicalAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_fibs); ip->AdapterFibsVirtualAddress = 0; ip->AdapterFibsSize = AAC_ADAPTER_FIBS * sizeof(struct aac_fib); ip->AdapterFibAlign = sizeof(struct aac_fib); ip->PrintfBufferAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_printf); ip->PrintfBufferSize = AAC_PRINTF_BUFSIZE; /* * The adapter assumes that pages are 4K in size, except on some * broken firmware versions that do the page->byte conversion twice, * therefore 'assuming' that this value is in 16MB units (2^24). * Round up since the granularity is so high. */ ip->HostPhysMemPages = ctob(physmem) / AAC_PAGE_SIZE; if (sc->flags & AAC_FLAGS_BROKEN_MEMMAP) { ip->HostPhysMemPages = (ip->HostPhysMemPages + AAC_PAGE_SIZE) / AAC_PAGE_SIZE; } ip->HostElapsedSeconds = time_uptime; /* reset later if invalid */ ip->InitFlags = AAC_INITFLAGS_NEW_COMM_SUPPORTED; if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE1) { ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_6; ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE1_SUPPORTED | AAC_INITFLAGS_FAST_JBOD_SUPPORTED); device_printf(sc->aac_dev, "New comm. interface type1 enabled\n"); } else if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) { ip->InitStructRevision = AAC_INIT_STRUCT_REVISION_7; ip->InitFlags |= (AAC_INITFLAGS_NEW_COMM_TYPE2_SUPPORTED | AAC_INITFLAGS_FAST_JBOD_SUPPORTED); device_printf(sc->aac_dev, "New comm. interface type2 enabled\n"); } ip->MaxNumAif = sc->aac_max_aif; ip->HostRRQ_AddrLow = sc->aac_common_busaddr + offsetof(struct aac_common, ac_host_rrq); /* always 32-bit address */ ip->HostRRQ_AddrHigh = 0; if (sc->aac_support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) { ip->InitFlags |= AAC_INITFLAGS_DRIVER_SUPPORTS_PM; ip->InitFlags |= AAC_INITFLAGS_DRIVER_USES_UTC_TIME; device_printf(sc->aac_dev, "Power Management enabled\n"); } ip->MaxIoCommands = sc->aac_max_fibs; ip->MaxIoSize = AAC_MAXIO_SIZE(sc); ip->MaxFibSize = sc->aac_max_fib_size; aac_adapter_init_tole(ip); /* * Do controller-type-specific initialisation */ AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, ~0); /* * Give the init structure to the controller. */ if (aacraid_sync_command(sc, AAC_MONKER_INITSTRUCT, sc->aac_common_busaddr + offsetof(struct aac_common, ac_init), 0, 0, 0, NULL, NULL)) { device_printf(sc->aac_dev, "error establishing init structure\n"); error = EIO; goto out; } /* * Check configuration issues */ if ((error = aac_check_config(sc)) != 0) goto out; error = 0; out: return(error); } static void aac_define_int_mode(struct aac_softc *sc) { device_t dev; int cap, msi_count, error = 0; uint32_t val; dev = sc->aac_dev; if (sc->flags & AAC_FLAGS_SYNC_MODE) { device_printf(dev, "using line interrupts\n"); sc->aac_max_msix = 1; sc->aac_vector_cap = sc->aac_max_fibs; return; } /* max. vectors from AAC_MONKER_GETCOMMPREF */ if (sc->aac_max_msix == 0) { if (sc->aac_hwif == AAC_HWIF_SRC) { msi_count = 1; if ((error = pci_alloc_msi(dev, &msi_count)) != 0) { device_printf(dev, "alloc msi failed - err=%d; " "will use INTx\n", error); pci_release_msi(dev); } else { sc->msi_tupelo = TRUE; } } if (sc->msi_tupelo) device_printf(dev, "using MSI interrupts\n"); else device_printf(dev, "using line interrupts\n"); sc->aac_max_msix = 1; sc->aac_vector_cap = sc->aac_max_fibs; return; } /* OS capability */ msi_count = pci_msix_count(dev); if (msi_count > AAC_MAX_MSIX) msi_count = AAC_MAX_MSIX; if (msi_count > sc->aac_max_msix) msi_count = sc->aac_max_msix; if (msi_count == 0 || (error = pci_alloc_msix(dev, &msi_count)) != 0) { device_printf(dev, "alloc msix failed - msi_count=%d, err=%d; " "will try MSI\n", msi_count, error); pci_release_msi(dev); } else { sc->msi_enabled = TRUE; device_printf(dev, "using MSI-X interrupts (%u vectors)\n", msi_count); } if (!sc->msi_enabled) { msi_count = 1; if ((error = pci_alloc_msi(dev, &msi_count)) != 0) { device_printf(dev, "alloc msi failed - err=%d; " "will use INTx\n", error); pci_release_msi(dev); } else { sc->msi_enabled = TRUE; device_printf(dev, "using MSI interrupts\n"); } } if (sc->msi_enabled) { /* now read controller capability from PCI config. space */ cap = aac_find_pci_capability(sc, PCIY_MSIX); val = (cap != 0 ? pci_read_config(dev, cap + 2, 2) : 0); if (!(val & AAC_PCI_MSI_ENABLE)) { pci_release_msi(dev); sc->msi_enabled = FALSE; } } if (!sc->msi_enabled) { device_printf(dev, "using legacy interrupts\n"); sc->aac_max_msix = 1; } else { AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX); if (sc->aac_max_msix > msi_count) sc->aac_max_msix = msi_count; } sc->aac_vector_cap = sc->aac_max_fibs / sc->aac_max_msix; fwprintf(sc, HBA_FLAGS_DBG_DEBUG_B, "msi_enabled %d vector_cap %d max_fibs %d max_msix %d", sc->msi_enabled,sc->aac_vector_cap, sc->aac_max_fibs, sc->aac_max_msix); } static int aac_find_pci_capability(struct aac_softc *sc, int cap) { device_t dev; uint32_t status; uint8_t ptr; dev = sc->aac_dev; status = pci_read_config(dev, PCIR_STATUS, 2); if (!(status & PCIM_STATUS_CAPPRESENT)) return (0); status = pci_read_config(dev, PCIR_HDRTYPE, 1); switch (status & PCIM_HDRTYPE) { case 0: case 1: ptr = PCIR_CAP_PTR; break; case 2: ptr = PCIR_CAP_PTR_2; break; default: return (0); break; } ptr = pci_read_config(dev, ptr, 1); while (ptr != 0) { int next, val; next = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1); val = pci_read_config(dev, ptr + PCICAP_ID, 1); if (val == cap) return (ptr); ptr = next; } return (0); } static int aac_setup_intr(struct aac_softc *sc) { int i, msi_count, rid; struct resource *res; void *tag; msi_count = sc->aac_max_msix; rid = ((sc->msi_enabled || sc->msi_tupelo)? 1:0); for (i = 0; i < msi_count; i++, rid++) { if ((res = bus_alloc_resource_any(sc->aac_dev,SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(sc->aac_dev,"can't allocate interrupt\n"); return (EINVAL); } sc->aac_irq_rid[i] = rid; sc->aac_irq[i] = res; if (aac_bus_setup_intr(sc->aac_dev, res, INTR_MPSAFE | INTR_TYPE_BIO, NULL, aacraid_new_intr_type1, &sc->aac_msix[i], &tag)) { device_printf(sc->aac_dev, "can't set up interrupt\n"); return (EINVAL); } sc->aac_msix[i].vector_no = i; sc->aac_msix[i].sc = sc; sc->aac_intr[i] = tag; } return (0); } static int aac_check_config(struct aac_softc *sc) { struct aac_fib *fib; struct aac_cnt_config *ccfg; struct aac_cf_status_hdr *cf_shdr; int rval; mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); ccfg = (struct aac_cnt_config *)&fib->data[0]; bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE); ccfg->Command = VM_ContainerConfig; ccfg->CTCommand.command = CT_GET_CONFIG_STATUS; ccfg->CTCommand.param[CNT_SIZE] = sizeof(struct aac_cf_status_hdr); aac_cnt_config_tole(ccfg); rval = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof (struct aac_cnt_config)); aac_cnt_config_toh(ccfg); cf_shdr = (struct aac_cf_status_hdr *)ccfg->CTCommand.data; if (rval == 0 && ccfg->Command == ST_OK && ccfg->CTCommand.param[0] == CT_OK) { if (le32toh(cf_shdr->action) <= CFACT_PAUSE) { bzero(ccfg, sizeof (*ccfg) - CT_PACKET_SIZE); ccfg->Command = VM_ContainerConfig; ccfg->CTCommand.command = CT_COMMIT_CONFIG; aac_cnt_config_tole(ccfg); rval = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof (struct aac_cnt_config)); aac_cnt_config_toh(ccfg); if (rval == 0 && ccfg->Command == ST_OK && ccfg->CTCommand.param[0] == CT_OK) { /* successful completion */ rval = 0; } else { /* auto commit aborted due to error(s) */ rval = -2; } } else { /* auto commit aborted due to adapter indicating config. issues too dangerous to auto commit */ rval = -3; } } else { /* error */ rval = -1; } aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return(rval); } /* * Send a synchronous command to the controller and wait for a result. * Indicate if the controller completed the command with an error status. */ int aacraid_sync_command(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3, u_int32_t *sp, u_int32_t *r1) { time_t then; u_int32_t status; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* populate the mailbox */ AAC_SET_MAILBOX(sc, command, arg0, arg1, arg2, arg3); /* ensure the sync command doorbell flag is cleared */ if (!sc->msi_enabled) AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); /* then set it to signal the adapter */ AAC_QNOTIFY(sc, AAC_DB_SYNC_COMMAND); if ((command != AAC_MONKER_SYNCFIB) || (sp == NULL) || (*sp != 0)) { /* spin waiting for the command to complete */ then = time_uptime; do { if (time_uptime > (then + AAC_SYNC_TIMEOUT)) { fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "timed out"); return(EIO); } } while (!(AAC_GET_ISTATUS(sc) & AAC_DB_SYNC_COMMAND)); /* clear the completion flag */ AAC_CLEAR_ISTATUS(sc, AAC_DB_SYNC_COMMAND); /* get the command status */ status = AAC_GET_MAILBOX(sc, 0); if (sp != NULL) *sp = status; /* return parameter */ if (r1 != NULL) *r1 = AAC_GET_MAILBOX(sc, 1); if (status != AAC_SRB_STS_SUCCESS) return (-1); } return(0); } static int aac_sync_fib(struct aac_softc *sc, u_int32_t command, u_int32_t xferstate, struct aac_fib *fib, u_int16_t datasize) { uint32_t ReceiverFibAddress; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); if (datasize > AAC_FIB_DATASIZE) return(EINVAL); /* * Set up the sync FIB */ fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY; fib->Header.XferState |= xferstate; fib->Header.Command = command; fib->Header.StructType = AAC_FIBTYPE_TFIB; fib->Header.Size = sizeof(struct aac_fib_header) + datasize; fib->Header.SenderSize = sizeof(struct aac_fib); fib->Header.SenderFibAddress = 0; /* Not needed */ ReceiverFibAddress = sc->aac_common_busaddr + offsetof(struct aac_common, ac_sync_fib); fib->Header.u.ReceiverFibAddress = ReceiverFibAddress; aac_fib_header_tole(&fib->Header); /* * Give the FIB to the controller, wait for a response. */ if (aacraid_sync_command(sc, AAC_MONKER_SYNCFIB, ReceiverFibAddress, 0, 0, 0, NULL, NULL)) { fwprintf(sc, HBA_FLAGS_DBG_ERROR_B, "IO error"); aac_fib_header_toh(&fib->Header); return(EIO); } aac_fib_header_toh(&fib->Header); return (0); } /* * Check for commands that have been outstanding for a suspiciously long time, * and complain about them. */ static void aac_timeout(struct aac_softc *sc) { struct aac_command *cm; time_t deadline; int timedout; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Traverse the busy command list, bitch about late commands once * only. */ timedout = 0; deadline = time_uptime - AAC_CMD_TIMEOUT; TAILQ_FOREACH(cm, &sc->aac_busy, cm_link) { if (cm->cm_timestamp < deadline) { device_printf(sc->aac_dev, "COMMAND %p TIMEOUT AFTER %d SECONDS\n", cm, (int)(time_uptime-cm->cm_timestamp)); AAC_PRINT_FIB(sc, cm->cm_fib); timedout++; } } if (timedout) aac_reset_adapter(sc); aacraid_print_queues(sc); } /* * Interface Function Vectors */ /* * Read the current firmware status word. */ static int aac_src_get_fwstatus(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_SRC_OMR)); } /* * Notify the controller of a change in a given queue */ static void aac_src_qnotify(struct aac_softc *sc, int qbit) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, qbit << AAC_SRC_IDR_SHIFT); } /* * Get the interrupt reason bits */ static int aac_src_get_istatus(struct aac_softc *sc) { int val; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (sc->msi_enabled) { val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_MSI); if (val & AAC_MSI_SYNC_STATUS) val = AAC_DB_SYNC_COMMAND; else val = 0; } else { val = AAC_MEM0_GETREG4(sc, AAC_SRC_ODBR_R) >> AAC_SRC_ODR_SHIFT; } return(val); } /* * Clear some interrupt reason bits */ static void aac_src_clear_istatus(struct aac_softc *sc, int mask) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (sc->msi_enabled) { if (mask == AAC_DB_SYNC_COMMAND) AAC_ACCESS_DEVREG(sc, AAC_CLEAR_SYNC_BIT); } else { AAC_MEM0_SETREG4(sc, AAC_SRC_ODBR_C, mask << AAC_SRC_ODR_SHIFT); } } /* * Populate the mailbox and set the command word */ static void aac_src_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX, command); AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 4, arg0); AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 8, arg1); AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 12, arg2); AAC_MEM0_SETREG4(sc, AAC_SRC_MAILBOX + 16, arg3); } static void aac_srcv_set_mailbox(struct aac_softc *sc, u_int32_t command, u_int32_t arg0, u_int32_t arg1, u_int32_t arg2, u_int32_t arg3) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX, command); AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 4, arg0); AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 8, arg1); AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 12, arg2); AAC_MEM0_SETREG4(sc, AAC_SRCV_MAILBOX + 16, arg3); } /* * Fetch the immediate command status word */ static int aac_src_get_mailbox(struct aac_softc *sc, int mb) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_SRC_MAILBOX + (mb * 4))); } static int aac_srcv_get_mailbox(struct aac_softc *sc, int mb) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(AAC_MEM0_GETREG4(sc, AAC_SRCV_MAILBOX + (mb * 4))); } /* * Set/clear interrupt masks */ static void aac_src_access_devreg(struct aac_softc *sc, int mode) { u_int32_t val; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); switch (mode) { case AAC_ENABLE_INTERRUPT: AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, (sc->msi_enabled ? AAC_INT_ENABLE_TYPE1_MSIX : AAC_INT_ENABLE_TYPE1_INTX)); break; case AAC_DISABLE_INTERRUPT: AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, AAC_INT_DISABLE_ALL); break; case AAC_ENABLE_MSIX: /* set bit 6 */ val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); val |= 0x40; AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); /* unmask int. */ val = PMC_ALL_INTERRUPT_BITS; AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val); val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR); AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, val & (~(PMC_GLOBAL_INT_BIT2 | PMC_GLOBAL_INT_BIT0))); break; case AAC_DISABLE_MSIX: /* reset bit 6 */ val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); val &= ~0x40; AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); break; case AAC_CLEAR_AIF_BIT: /* set bit 5 */ val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); val |= 0x20; AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); break; case AAC_CLEAR_SYNC_BIT: /* set bit 4 */ val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); val |= 0x10; AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); break; case AAC_ENABLE_INTX: /* set bit 7 */ val = AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); val |= 0x80; AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, val); AAC_MEM0_GETREG4(sc, AAC_SRC_IDBR); /* unmask int. */ val = PMC_ALL_INTERRUPT_BITS; AAC_MEM0_SETREG4(sc, AAC_SRC_IOAR, val); val = AAC_MEM0_GETREG4(sc, AAC_SRC_OIMR); AAC_MEM0_SETREG4(sc, AAC_SRC_OIMR, val & (~(PMC_GLOBAL_INT_BIT2))); break; default: break; } } /* * New comm. interface: Send command functions */ static int aac_src_send_command(struct aac_softc *sc, struct aac_command *cm) { struct aac_fib_xporthdr *pFibX; u_int32_t fibsize, high_addr; u_int64_t address; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, "send command (new comm. type1)"); if (sc->msi_enabled && cm->cm_fib->Header.Command != AifRequest && sc->aac_max_msix > 1) { u_int16_t vector_no, first_choice = 0xffff; vector_no = sc->aac_fibs_pushed_no % sc->aac_max_msix; do { vector_no += 1; if (vector_no == sc->aac_max_msix) vector_no = 1; if (sc->aac_rrq_outstanding[vector_no] < sc->aac_vector_cap) break; if (0xffff == first_choice) first_choice = vector_no; else if (vector_no == first_choice) break; } while (1); if (vector_no == first_choice) vector_no = 0; sc->aac_rrq_outstanding[vector_no]++; if (sc->aac_fibs_pushed_no == 0xffffffff) sc->aac_fibs_pushed_no = 0; else sc->aac_fibs_pushed_no++; cm->cm_fib->Header.Handle += (vector_no << 16); } if (sc->flags & AAC_FLAGS_NEW_COMM_TYPE2) { /* Calculate the amount to the fibsize bits */ fibsize = (cm->cm_fib->Header.Size + 127) / 128 - 1; /* Fill new FIB header */ address = cm->cm_fibphys; high_addr = (u_int32_t)(address >> 32); if (high_addr == 0L) { cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2; cm->cm_fib->Header.u.TimeStamp = 0L; } else { cm->cm_fib->Header.StructType = AAC_FIBTYPE_TFIB2_64; cm->cm_fib->Header.u.SenderFibAddressHigh = high_addr; } cm->cm_fib->Header.SenderFibAddress = (u_int32_t)address; } else { /* Calculate the amount to the fibsize bits */ fibsize = (sizeof(struct aac_fib_xporthdr) + cm->cm_fib->Header.Size + 127) / 128 - 1; /* Fill XPORT header */ pFibX = (struct aac_fib_xporthdr *) ((unsigned char *)cm->cm_fib - sizeof(struct aac_fib_xporthdr)); pFibX->Handle = cm->cm_fib->Header.Handle; pFibX->HostAddress = cm->cm_fibphys; pFibX->Size = cm->cm_fib->Header.Size; aac_fib_xporthdr_tole(pFibX); address = cm->cm_fibphys - sizeof(struct aac_fib_xporthdr); high_addr = (u_int32_t)(address >> 32); } aac_fib_header_tole(&cm->cm_fib->Header); if (fibsize > 31) fibsize = 31; aac_enqueue_busy(cm); if (high_addr) { AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_H, high_addr); AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE64_L, (u_int32_t)address + fibsize); } else { AAC_MEM0_SETREG4(sc, AAC_SRC_IQUE32, (u_int32_t)address + fibsize); } return 0; } /* * New comm. interface: get, set outbound queue index */ static int aac_src_get_outb_queue(struct aac_softc *sc) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); return(-1); } static void aac_src_set_outb_queue(struct aac_softc *sc, int index) { fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); } /* * Debugging and Diagnostics */ /* * Print some information about the controller. */ static void aac_describe_controller(struct aac_softc *sc) { struct aac_fib *fib; struct aac_adapter_info *info; char *adapter_type = "Adaptec RAID controller"; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); if (sc->supported_options & AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO) { fib->data[0] = 0; if (aac_sync_fib(sc, RequestSupplementAdapterInfo, 0, fib, 1)) device_printf(sc->aac_dev, "RequestSupplementAdapterInfo failed\n"); else { struct aac_supplement_adapter_info *supp_info; supp_info = ((struct aac_supplement_adapter_info *)&fib->data[0]); adapter_type = (char *)supp_info->AdapterTypeText; sc->aac_feature_bits = le32toh(supp_info->FeatureBits); sc->aac_support_opt2 = le32toh(supp_info->SupportedOptions2); } } device_printf(sc->aac_dev, "%s, aacraid driver %d.%d.%d-%d\n", adapter_type, AAC_DRIVER_MAJOR_VERSION, AAC_DRIVER_MINOR_VERSION, AAC_DRIVER_BUGFIX_LEVEL, AAC_DRIVER_BUILD); fib->data[0] = 0; if (aac_sync_fib(sc, RequestAdapterInfo, 0, fib, 1)) { device_printf(sc->aac_dev, "RequestAdapterInfo failed\n"); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } /* save the kernel revision structure for later use */ info = (struct aac_adapter_info *)&fib->data[0]; aac_adapter_info_toh(info); sc->aac_revision = info->KernelRevision; if (bootverbose) { device_printf(sc->aac_dev, "%s %dMHz, %dMB memory " "(%dMB cache, %dMB execution), %s\n", aac_describe_code(aac_cpu_variant, info->CpuVariant), info->ClockSpeed, info->TotalMem / (1024 * 1024), info->BufferMem / (1024 * 1024), info->ExecutionMem / (1024 * 1024), aac_describe_code(aac_battery_platform, info->batteryPlatform)); device_printf(sc->aac_dev, "Kernel %d.%d-%d, Build %d, S/N %6X\n", info->KernelRevision.external.comp.major, info->KernelRevision.external.comp.minor, info->KernelRevision.external.comp.dash, info->KernelRevision.buildNumber, (u_int32_t)(info->SerialNumber & 0xffffff)); device_printf(sc->aac_dev, "Supported Options=%b\n", sc->supported_options, "\20" "\1SNAPSHOT" "\2CLUSTERS" "\3WCACHE" "\4DATA64" "\5HOSTTIME" "\6RAID50" "\7WINDOW4GB" "\10SCSIUPGD" "\11SOFTERR" "\12NORECOND" "\13SGMAP64" "\14ALARM" "\15NONDASD" "\16SCSIMGT" "\17RAIDSCSI" "\21ADPTINFO" "\22NEWCOMM" "\23ARRAY64BIT" "\24HEATSENSOR"); } aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); } /* * Look up a text description of a numeric error code and return a pointer to * same. */ static char * aac_describe_code(struct aac_code_lookup *table, u_int32_t code) { int i; for (i = 0; table[i].string != NULL; i++) if (table[i].code == code) return(table[i].string); return(table[i + 1].string); } /* * Management Interface */ static int aac_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct aac_softc *sc; sc = dev->si_drv1; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); device_busy(sc->aac_dev); devfs_set_cdevpriv(sc, aac_cdevpriv_dtor); return 0; } static int aac_ioctl(struct cdev *dev, u_long cmd, caddr_t arg, int flag, struct thread *td) { union aac_statrequest *as; struct aac_softc *sc; int error = 0; as = (union aac_statrequest *)arg; sc = dev->si_drv1; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); switch (cmd) { case AACIO_STATS: switch (as->as_item) { case AACQ_FREE: case AACQ_READY: case AACQ_BUSY: bcopy(&sc->aac_qstat[as->as_item], &as->as_qstat, sizeof(struct aac_qstat)); break; default: error = ENOENT; break; } break; case FSACTL_SENDFIB: case FSACTL_SEND_LARGE_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_SENDFIB: case FSACTL_LNX_SEND_LARGE_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SENDFIB"); error = aac_ioctl_sendfib(sc, arg); break; case FSACTL_SEND_RAW_SRB: arg = *(caddr_t*)arg; case FSACTL_LNX_SEND_RAW_SRB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_SEND_RAW_SRB"); error = aac_ioctl_send_raw_srb(sc, arg); break; case FSACTL_AIF_THREAD: case FSACTL_LNX_AIF_THREAD: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_AIF_THREAD"); error = EINVAL; break; case FSACTL_OPEN_GET_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_OPEN_GET_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_OPEN_GET_ADAPTER_FIB"); error = aac_open_aif(sc, arg); break; case FSACTL_GET_NEXT_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_NEXT_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_NEXT_ADAPTER_FIB"); error = aac_getnext_aif(sc, arg); break; case FSACTL_CLOSE_GET_ADAPTER_FIB: arg = *(caddr_t*)arg; case FSACTL_LNX_CLOSE_GET_ADAPTER_FIB: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_CLOSE_GET_ADAPTER_FIB"); error = aac_close_aif(sc, arg); break; case FSACTL_MINIPORT_REV_CHECK: arg = *(caddr_t*)arg; case FSACTL_LNX_MINIPORT_REV_CHECK: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_MINIPORT_REV_CHECK"); error = aac_rev_check(sc, arg); break; case FSACTL_QUERY_DISK: arg = *(caddr_t*)arg; case FSACTL_LNX_QUERY_DISK: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_QUERY_DISK"); error = aac_query_disk(sc, arg); break; case FSACTL_DELETE_DISK: case FSACTL_LNX_DELETE_DISK: /* * We don't trust the underland to tell us when to delete a * container, rather we rely on an AIF coming from the * controller */ error = 0; break; case FSACTL_GET_PCI_INFO: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_PCI_INFO: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_PCI_INFO"); error = aac_get_pci_info(sc, arg); break; case FSACTL_GET_FEATURES: arg = *(caddr_t*)arg; case FSACTL_LNX_GET_FEATURES: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "FSACTL_GET_FEATURES"); error = aac_supported_features(sc, arg); break; default: fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "unsupported cmd 0x%lx\n", cmd); error = EINVAL; break; } return(error); } static int aac_poll(struct cdev *dev, int poll_events, struct thread *td) { struct aac_softc *sc; struct aac_fib_context *ctx; int revents; sc = dev->si_drv1; revents = 0; mtx_lock(&sc->aac_io_lock); if ((poll_events & (POLLRDNORM | POLLIN)) != 0) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (ctx->ctx_idx != sc->aifq_idx || ctx->ctx_wrap) { revents |= poll_events & (POLLIN | POLLRDNORM); break; } } } mtx_unlock(&sc->aac_io_lock); if (revents == 0) { if (poll_events & (POLLIN | POLLRDNORM)) selrecord(td, &sc->rcv_select); } return (revents); } static void aac_ioctl_event(struct aac_softc *sc, struct aac_event *event, void *arg) { switch (event->ev_type) { case AAC_EVENT_CMFREE: mtx_assert(&sc->aac_io_lock, MA_OWNED); if (aacraid_alloc_command(sc, (struct aac_command **)arg)) { aacraid_add_event(sc, event); return; } free(event, M_AACRAIDBUF); wakeup(arg); break; default: break; } } /* * Send a FIB supplied from userspace * * Currently, sending a FIB from userspace in BE hosts is not supported. * There are several things that need to be considered in order to * support this, such as: * - At least the FIB data part from userspace should already be in LE, * or else the kernel would need to know all FIB types to be able to * correctly convert it to BE. * - SG tables are converted to BE by aacraid_map_command_sg(). This * conversion should be supressed if the FIB comes from userspace. * - aacraid_wait_command() calls functions that convert the FIB header * to LE. But if the header is already in LE, the conversion should not * be performed. */ static int aac_ioctl_sendfib(struct aac_softc *sc, caddr_t ufib) { struct aac_command *cm; int size, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); cm = NULL; /* * Get a command */ mtx_lock(&sc->aac_io_lock); if (aacraid_alloc_command(sc, &cm)) { struct aac_event *event; event = malloc(sizeof(struct aac_event), M_AACRAIDBUF, M_NOWAIT | M_ZERO); if (event == NULL) { error = EBUSY; mtx_unlock(&sc->aac_io_lock); goto out; } event->ev_type = AAC_EVENT_CMFREE; event->ev_callback = aac_ioctl_event; event->ev_arg = &cm; aacraid_add_event(sc, event); msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsfib", 0); } mtx_unlock(&sc->aac_io_lock); /* * Fetch the FIB header, then re-copy to get data as well. */ if ((error = copyin(ufib, cm->cm_fib, sizeof(struct aac_fib_header))) != 0) goto out; size = cm->cm_fib->Header.Size + sizeof(struct aac_fib_header); if (size > sc->aac_max_fib_size) { device_printf(sc->aac_dev, "incoming FIB oversized (%d > %d)\n", size, sc->aac_max_fib_size); size = sc->aac_max_fib_size; } if ((error = copyin(ufib, cm->cm_fib, size)) != 0) goto out; cm->cm_fib->Header.Size = size; cm->cm_timestamp = time_uptime; cm->cm_datalen = 0; /* * Pass the FIB to the controller, wait for it to complete. */ mtx_lock(&sc->aac_io_lock); error = aacraid_wait_command(cm); mtx_unlock(&sc->aac_io_lock); if (error != 0) { device_printf(sc->aac_dev, "aacraid_wait_command return %d\n", error); goto out; } /* * Copy the FIB and data back out to the caller. */ size = cm->cm_fib->Header.Size; if (size > sc->aac_max_fib_size) { device_printf(sc->aac_dev, "outbound FIB oversized (%d > %d)\n", size, sc->aac_max_fib_size); size = sc->aac_max_fib_size; } error = copyout(cm->cm_fib, ufib, size); out: if (cm != NULL) { mtx_lock(&sc->aac_io_lock); aacraid_release_command(cm); mtx_unlock(&sc->aac_io_lock); } return(error); } /* * Send a passthrough FIB supplied from userspace */ static int aac_ioctl_send_raw_srb(struct aac_softc *sc, caddr_t arg) { struct aac_command *cm; struct aac_fib *fib; struct aac_srb *srbcmd; struct aac_srb *user_srb = (struct aac_srb *)arg; void *user_reply; int error, transfer_data = 0; bus_dmamap_t orig_map = 0; u_int32_t fibsize = 0; u_int64_t srb_sg_address; u_int32_t srb_sg_bytecount; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); cm = NULL; mtx_lock(&sc->aac_io_lock); if (aacraid_alloc_command(sc, &cm)) { struct aac_event *event; event = malloc(sizeof(struct aac_event), M_AACRAIDBUF, M_NOWAIT | M_ZERO); if (event == NULL) { error = EBUSY; mtx_unlock(&sc->aac_io_lock); goto out; } event->ev_type = AAC_EVENT_CMFREE; event->ev_callback = aac_ioctl_event; event->ev_arg = &cm; aacraid_add_event(sc, event); msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsraw", 0); } mtx_unlock(&sc->aac_io_lock); cm->cm_data = NULL; /* save original dma map */ orig_map = cm->cm_datamap; fib = cm->cm_fib; srbcmd = (struct aac_srb *)fib->data; if ((error = copyin((void *)&user_srb->data_len, &fibsize, sizeof (u_int32_t))) != 0) goto out; if (fibsize > (sc->aac_max_fib_size-sizeof(struct aac_fib_header))) { error = EINVAL; goto out; } if ((error = copyin((void *)user_srb, srbcmd, fibsize)) != 0) goto out; srbcmd->function = 0; /* SRBF_ExecuteScsi */ srbcmd->retry_limit = 0; /* obsolete */ /* only one sg element from userspace supported */ if (srbcmd->sg_map.SgCount > 1) { error = EINVAL; goto out; } /* check fibsize */ if (fibsize == (sizeof(struct aac_srb) + srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry))) { struct aac_sg_entry *sgp = srbcmd->sg_map.SgEntry; struct aac_sg_entry sg; if ((error = copyin(sgp, &sg, sizeof(sg))) != 0) goto out; srb_sg_bytecount = sg.SgByteCount; srb_sg_address = (u_int64_t)sg.SgAddress; } else if (fibsize == (sizeof(struct aac_srb) + srbcmd->sg_map.SgCount * sizeof(struct aac_sg_entry64))) { #ifdef __LP64__ struct aac_sg_entry64 *sgp = (struct aac_sg_entry64 *)srbcmd->sg_map.SgEntry; struct aac_sg_entry64 sg; if ((error = copyin(sgp, &sg, sizeof(sg))) != 0) goto out; srb_sg_bytecount = sg.SgByteCount; srb_sg_address = sg.SgAddress; #else error = EINVAL; goto out; #endif } else { error = EINVAL; goto out; } user_reply = (char *)arg + fibsize; srbcmd->data_len = srb_sg_bytecount; if (srbcmd->sg_map.SgCount == 1) transfer_data = 1; if (transfer_data) { /* * Create DMA tag for the passthr. data buffer and allocate it. */ if (bus_dma_tag_create(sc->aac_parent_dmat, /* parent */ 1, 0, /* algnmnt, boundary */ (sc->flags & AAC_FLAGS_SG_64BIT) ? BUS_SPACE_MAXADDR_32BIT : 0x7fffffff, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ srb_sg_bytecount, /* size */ sc->aac_sg_tablesize, /* nsegments */ srb_sg_bytecount, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* No locking needed */ &cm->cm_passthr_dmat)) { error = ENOMEM; goto out; } if (bus_dmamem_alloc(cm->cm_passthr_dmat, (void **)&cm->cm_data, BUS_DMA_NOWAIT, &cm->cm_datamap)) { error = ENOMEM; goto out; } /* fill some cm variables */ cm->cm_datalen = srb_sg_bytecount; if (srbcmd->flags & AAC_SRB_FLAGS_DATA_IN) cm->cm_flags |= AAC_CMD_DATAIN; if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) cm->cm_flags |= AAC_CMD_DATAOUT; if (srbcmd->flags & AAC_SRB_FLAGS_DATA_OUT) { if ((error = copyin((void *)(uintptr_t)srb_sg_address, cm->cm_data, cm->cm_datalen)) != 0) goto out; /* sync required for bus_dmamem_alloc() alloc. mem.? */ bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap, BUS_DMASYNC_PREWRITE); } } /* build the FIB */ fib->Header.Size = sizeof(struct aac_fib_header) + sizeof(struct aac_srb); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC; fib->Header.Command = (sc->flags & AAC_FLAGS_SG_64BIT) ? ScsiPortCommandU64 : ScsiPortCommand; cm->cm_sgtable = (struct aac_sg_table *)&srbcmd->sg_map; aac_srb_tole(srbcmd); /* send command */ if (transfer_data) { bus_dmamap_load(cm->cm_passthr_dmat, cm->cm_datamap, cm->cm_data, cm->cm_datalen, aacraid_map_command_sg, cm, 0); } else { aacraid_map_command_sg(cm, NULL, 0, 0); } /* wait for completion */ mtx_lock(&sc->aac_io_lock); while (!(cm->cm_flags & AAC_CMD_COMPLETED)) msleep(cm, &sc->aac_io_lock, 0, "aacraid_ctlsrw2", 0); mtx_unlock(&sc->aac_io_lock); /* copy data */ if (transfer_data && (le32toh(srbcmd->flags) & AAC_SRB_FLAGS_DATA_IN)) { if ((error = copyout(cm->cm_data, (void *)(uintptr_t)srb_sg_address, cm->cm_datalen)) != 0) goto out; /* sync required for bus_dmamem_alloc() allocated mem.? */ bus_dmamap_sync(cm->cm_passthr_dmat, cm->cm_datamap, BUS_DMASYNC_POSTREAD); } /* status */ aac_srb_response_toh((struct aac_srb_response *)fib->data); error = copyout(fib->data, user_reply, sizeof(struct aac_srb_response)); out: if (cm && cm->cm_data) { if (transfer_data) bus_dmamap_unload(cm->cm_passthr_dmat, cm->cm_datamap); bus_dmamem_free(cm->cm_passthr_dmat, cm->cm_data, cm->cm_datamap); cm->cm_datamap = orig_map; } if (cm && cm->cm_passthr_dmat) bus_dma_tag_destroy(cm->cm_passthr_dmat); if (cm) { mtx_lock(&sc->aac_io_lock); aacraid_release_command(cm); mtx_unlock(&sc->aac_io_lock); } return(error); } /* * Request an AIF from the controller (new comm. type1) */ static void aac_request_aif(struct aac_softc *sc) { struct aac_command *cm; struct aac_fib *fib; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if (aacraid_alloc_command(sc, &cm)) { sc->aif_pending = 1; return; } sc->aif_pending = 0; /* build the FIB */ fib = cm->cm_fib; fib->Header.Size = sizeof(struct aac_fib); fib->Header.XferState = AAC_FIBSTATE_HOSTOWNED | AAC_FIBSTATE_INITIALISED | AAC_FIBSTATE_EMPTY | AAC_FIBSTATE_FROMHOST | AAC_FIBSTATE_REXPECTED | AAC_FIBSTATE_NORM | AAC_FIBSTATE_ASYNC; /* set AIF marker */ fib->Header.Handle = 0x00800000; fib->Header.Command = AifRequest; ((struct aac_aif_command *)fib->data)->command = htole32(AifReqEvent); aacraid_map_command_sg(cm, NULL, 0, 0); } /* * cdevpriv interface private destructor. */ static void aac_cdevpriv_dtor(void *arg) { struct aac_softc *sc; sc = arg; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); device_unbusy(sc->aac_dev); } /* * Handle an AIF sent to us by the controller; queue it for later reference. * If the queue fills up, then drop the older entries. */ static void aac_handle_aif(struct aac_softc *sc, struct aac_fib *fib) { struct aac_aif_command *aif; struct aac_container *co, *co_next; struct aac_fib_context *ctx; struct aac_fib *sync_fib; struct aac_mntinforesp mir; int next, current, found; int count = 0, changed = 0, i = 0; u_int32_t channel, uid; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); aif = (struct aac_aif_command*)&fib->data[0]; aacraid_print_aif(sc, aif); /* Is it an event that we should care about? */ switch (le32toh(aif->command)) { case AifCmdEventNotify: switch (le32toh(aif->data.EN.type)) { case AifEnAddContainer: case AifEnDeleteContainer: /* * A container was added or deleted, but the message * doesn't tell us anything else! Re-enumerate the * containers and sort things out. */ aac_alloc_sync_fib(sc, &sync_fib); do { /* * Ask the controller for its containers one at * a time. * XXX What if the controller's list changes * midway through this enumaration? * XXX This should be done async. */ if (aac_get_container_info(sc, sync_fib, i, &mir, &uid) != 0) continue; if (i == 0) count = mir.MntRespCount; /* * Check the container against our list. * co->co_found was already set to 0 in a * previous run. */ if ((mir.Status == ST_OK) && (mir.MntTable[0].VolType != CT_NONE)) { found = 0; TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { if (co->co_mntobj.ObjectId == mir.MntTable[0].ObjectId) { co->co_found = 1; found = 1; break; } } /* * If the container matched, continue * in the list. */ if (found) { i++; continue; } /* * This is a new container. Do all the * appropriate things to set it up. */ aac_add_container(sc, &mir, 1, uid); changed = 1; } i++; } while ((i < count) && (i < AAC_MAX_CONTAINERS)); aac_release_sync_fib(sc); /* * Go through our list of containers and see which ones * were not marked 'found'. Since the controller didn't * list them they must have been deleted. Do the * appropriate steps to destroy the device. Also reset * the co->co_found field. */ co = TAILQ_FIRST(&sc->aac_container_tqh); while (co != NULL) { if (co->co_found == 0) { co_next = TAILQ_NEXT(co, co_link); TAILQ_REMOVE(&sc->aac_container_tqh, co, co_link); free(co, M_AACRAIDBUF); changed = 1; co = co_next; } else { co->co_found = 0; co = TAILQ_NEXT(co, co_link); } } /* Attach the newly created containers */ if (changed) { if (sc->cam_rescan_cb != NULL) sc->cam_rescan_cb(sc, 0, AAC_CAM_TARGET_WILDCARD); } break; case AifEnEnclosureManagement: switch (le32toh(aif->data.EN.data.EEE.eventType)) { case AIF_EM_DRIVE_INSERTION: case AIF_EM_DRIVE_REMOVAL: channel = le32toh(aif->data.EN.data.EEE.unitID); if (sc->cam_rescan_cb != NULL) sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1, (channel & 0xFFFF)); break; } break; case AifEnAddJBOD: case AifEnDeleteJBOD: case AifRawDeviceRemove: channel = le32toh(aif->data.EN.data.ECE.container); if (sc->cam_rescan_cb != NULL) sc->cam_rescan_cb(sc, ((channel>>24) & 0xF) + 1, AAC_CAM_TARGET_WILDCARD); break; default: break; } default: break; } /* Copy the AIF data to the AIF queue for ioctl retrieval */ current = sc->aifq_idx; next = (current + 1) % AAC_AIFQ_LENGTH; if (next == 0) sc->aifq_filled = 1; bcopy(fib, &sc->aac_aifq[current], sizeof(struct aac_fib)); /* Make aifq's FIB header and data LE */ aac_fib_header_tole(&sc->aac_aifq[current].Header); /* modify AIF contexts */ if (sc->aifq_filled) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (next == ctx->ctx_idx) ctx->ctx_wrap = 1; else if (current == ctx->ctx_idx && ctx->ctx_wrap) ctx->ctx_idx = next; } } sc->aifq_idx = next; /* On the off chance that someone is sleeping for an aif... */ if (sc->aac_state & AAC_STATE_AIF_SLEEPER) wakeup(sc->aac_aifq); /* Wakeup any poll()ers */ selwakeuppri(&sc->rcv_select, PRIBIO); return; } /* * Return the Revision of the driver to userspace and check to see if the * userspace app is possibly compatible. This is extremely bogus since * our driver doesn't follow Adaptec's versioning system. Cheat by just * returning what the card reported. */ static int aac_rev_check(struct aac_softc *sc, caddr_t udata) { struct aac_rev_check rev_check; struct aac_rev_check_resp rev_check_resp; int error = 0; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); /* * Copyin the revision struct from userspace */ if ((error = copyin(udata, (caddr_t)&rev_check, sizeof(struct aac_rev_check))) != 0) { return error; } fwprintf(sc, HBA_FLAGS_DBG_IOCTL_COMMANDS_B, "Userland revision= %d\n", rev_check.callingRevision.buildNumber); /* * Doctor up the response struct. */ rev_check_resp.possiblyCompatible = 1; rev_check_resp.adapterSWRevision.external.comp.major = AAC_DRIVER_MAJOR_VERSION; rev_check_resp.adapterSWRevision.external.comp.minor = AAC_DRIVER_MINOR_VERSION; rev_check_resp.adapterSWRevision.external.comp.type = AAC_DRIVER_TYPE; rev_check_resp.adapterSWRevision.external.comp.dash = AAC_DRIVER_BUGFIX_LEVEL; rev_check_resp.adapterSWRevision.buildNumber = AAC_DRIVER_BUILD; return(copyout((caddr_t)&rev_check_resp, udata, sizeof(struct aac_rev_check_resp))); } /* * Pass the fib context to the caller */ static int aac_open_aif(struct aac_softc *sc, caddr_t arg) { struct aac_fib_context *fibctx, *ctx; int error = 0; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); fibctx = malloc(sizeof(struct aac_fib_context), M_AACRAIDBUF, M_NOWAIT|M_ZERO); if (fibctx == NULL) return (ENOMEM); mtx_lock(&sc->aac_io_lock); /* all elements are already 0, add to queue */ if (sc->fibctx == NULL) sc->fibctx = fibctx; else { for (ctx = sc->fibctx; ctx->next; ctx = ctx->next) ; ctx->next = fibctx; fibctx->prev = ctx; } /* evaluate unique value */ fibctx->unique = (*(u_int32_t *)&fibctx & 0xffffffff); ctx = sc->fibctx; while (ctx != fibctx) { if (ctx->unique == fibctx->unique) { fibctx->unique++; ctx = sc->fibctx; } else { ctx = ctx->next; } } error = copyout(&fibctx->unique, (void *)arg, sizeof(u_int32_t)); mtx_unlock(&sc->aac_io_lock); if (error) aac_close_aif(sc, (caddr_t)ctx); return error; } /* * Close the caller's fib context */ static int aac_close_aif(struct aac_softc *sc, caddr_t arg) { struct aac_fib_context *ctx; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (ctx->unique == *(uint32_t *)&arg) { if (ctx == sc->fibctx) sc->fibctx = NULL; else { ctx->prev->next = ctx->next; if (ctx->next) ctx->next->prev = ctx->prev; } break; } } if (ctx) free(ctx, M_AACRAIDBUF); mtx_unlock(&sc->aac_io_lock); return 0; } /* * Pass the caller the next AIF in their queue */ static int aac_getnext_aif(struct aac_softc *sc, caddr_t arg) { struct get_adapter_fib_ioctl agf; struct aac_fib_context *ctx; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); #ifdef COMPAT_FREEBSD32 if (SV_CURPROC_FLAG(SV_ILP32)) { struct get_adapter_fib_ioctl32 agf32; error = copyin(arg, &agf32, sizeof(agf32)); if (error == 0) { agf.AdapterFibContext = agf32.AdapterFibContext; agf.Wait = agf32.Wait; agf.AifFib = (caddr_t)(uintptr_t)agf32.AifFib; } } else #endif error = copyin(arg, &agf, sizeof(agf)); if (error == 0) { for (ctx = sc->fibctx; ctx; ctx = ctx->next) { if (agf.AdapterFibContext == ctx->unique) break; } if (!ctx) { mtx_unlock(&sc->aac_io_lock); return (EFAULT); } error = aac_return_aif(sc, ctx, agf.AifFib); if (error == EAGAIN && agf.Wait) { fwprintf(sc, HBA_FLAGS_DBG_AIF_B, "aac_getnext_aif(): waiting for AIF"); sc->aac_state |= AAC_STATE_AIF_SLEEPER; while (error == EAGAIN) { mtx_unlock(&sc->aac_io_lock); error = tsleep(sc->aac_aifq, PRIBIO | PCATCH, "aacaif", 0); mtx_lock(&sc->aac_io_lock); if (error == 0) error = aac_return_aif(sc, ctx, agf.AifFib); } sc->aac_state &= ~AAC_STATE_AIF_SLEEPER; } } mtx_unlock(&sc->aac_io_lock); return(error); } /* * Hand the next AIF off the top of the queue out to userspace. */ static int aac_return_aif(struct aac_softc *sc, struct aac_fib_context *ctx, caddr_t uptr) { int current, error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); current = ctx->ctx_idx; if (current == sc->aifq_idx && !ctx->ctx_wrap) { /* empty */ return (EAGAIN); } error = copyout(&sc->aac_aifq[current], (void *)uptr, sizeof(struct aac_fib)); if (error) device_printf(sc->aac_dev, "aac_return_aif: copyout returned %d\n", error); else { ctx->ctx_wrap = 0; ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH; } return(error); } static int aac_get_pci_info(struct aac_softc *sc, caddr_t uptr) { struct aac_pci_info { u_int32_t bus; u_int32_t slot; } pciinf; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); pciinf.bus = pci_get_bus(sc->aac_dev); pciinf.slot = pci_get_slot(sc->aac_dev); error = copyout((caddr_t)&pciinf, uptr, sizeof(struct aac_pci_info)); return (error); } static int aac_supported_features(struct aac_softc *sc, caddr_t uptr) { struct aac_features f; int error; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); if ((error = copyin(uptr, &f, sizeof (f))) != 0) return (error); /* * When the management driver receives FSACTL_GET_FEATURES ioctl with * ALL zero in the featuresState, the driver will return the current * state of all the supported features, the data field will not be * valid. * When the management driver receives FSACTL_GET_FEATURES ioctl with * a specific bit set in the featuresState, the driver will return the * current state of this specific feature and whatever data that are * associated with the feature in the data field or perform whatever * action needed indicates in the data field. */ if (f.feat.fValue == 0) { f.feat.fBits.largeLBA = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; f.feat.fBits.JBODSupport = 1; /* TODO: In the future, add other features state here as well */ } else { if (f.feat.fBits.largeLBA) f.feat.fBits.largeLBA = (sc->flags & AAC_FLAGS_LBA_64BIT) ? 1 : 0; /* TODO: Add other features state and data in the future */ } error = copyout(&f, uptr, sizeof (f)); return (error); } /* * Give the userland some information about the container. The AAC arch * expects the driver to be a SCSI passthrough type driver, so it expects * the containers to have b:t:l numbers. Fake it. */ static int aac_query_disk(struct aac_softc *sc, caddr_t uptr) { struct aac_query_disk query_disk; struct aac_container *co; int error, id; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_lock(&sc->aac_io_lock); error = copyin(uptr, (caddr_t)&query_disk, sizeof(struct aac_query_disk)); if (error) { mtx_unlock(&sc->aac_io_lock); return (error); } id = query_disk.ContainerNumber; if (id == -1) { mtx_unlock(&sc->aac_io_lock); return (EINVAL); } TAILQ_FOREACH(co, &sc->aac_container_tqh, co_link) { if (co->co_mntobj.ObjectId == id) break; } if (co == NULL) { query_disk.Valid = 0; query_disk.Locked = 0; query_disk.Deleted = 1; /* XXX is this right? */ } else { query_disk.Valid = 1; query_disk.Locked = 1; query_disk.Deleted = 0; query_disk.Bus = device_get_unit(sc->aac_dev); query_disk.Target = 0; query_disk.Lun = 0; query_disk.UnMapped = 0; } error = copyout((caddr_t)&query_disk, uptr, sizeof(struct aac_query_disk)); mtx_unlock(&sc->aac_io_lock); return (error); } static void aac_container_bus(struct aac_softc *sc) { struct aac_sim *sim; device_t child; sim =(struct aac_sim *)malloc(sizeof(struct aac_sim), M_AACRAIDBUF, M_NOWAIT | M_ZERO); if (sim == NULL) { device_printf(sc->aac_dev, "No memory to add container bus\n"); panic("Out of memory?!"); } child = device_add_child(sc->aac_dev, "aacraidp", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->aac_dev, "device_add_child failed for container bus\n"); free(sim, M_AACRAIDBUF); panic("Out of memory?!"); } sim->TargetsPerBus = AAC_MAX_CONTAINERS; sim->BusNumber = 0; sim->BusType = CONTAINER_BUS; sim->InitiatorBusId = -1; sim->aac_sc = sc; sim->sim_dev = child; sim->aac_cam = NULL; device_set_ivars(child, sim); device_set_desc(child, "Container Bus"); TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, sim, sim_link); /* device_set_desc(child, aac_describe_code(aac_container_types, mir->MntTable[0].VolType)); */ bus_attach_children(sc->aac_dev); } static void aac_get_bus_info(struct aac_softc *sc) { struct aac_fib *fib; struct aac_ctcfg *c_cmd; struct aac_ctcfg_resp *c_resp; struct aac_vmioctl *vmi; struct aac_vmi_businf_resp *vmi_resp; struct aac_getbusinf businfo; struct aac_sim *caminf; device_t child; int i, error; mtx_lock(&sc->aac_io_lock); aac_alloc_sync_fib(sc, &fib); c_cmd = (struct aac_ctcfg *)&fib->data[0]; bzero(c_cmd, sizeof(struct aac_ctcfg)); c_cmd->Command = VM_ContainerConfig; c_cmd->cmd = CT_GET_SCSI_METHOD; c_cmd->param = 0; aac_ctcfg_tole(c_cmd); error = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_ctcfg)); if (error) { device_printf(sc->aac_dev, "Error %d sending " "VM_ContainerConfig command\n", error); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } c_resp = (struct aac_ctcfg_resp *)&fib->data[0]; aac_ctcfg_resp_toh(c_resp); if (c_resp->Status != ST_OK) { device_printf(sc->aac_dev, "VM_ContainerConfig returned 0x%x\n", c_resp->Status); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } sc->scsi_method_id = c_resp->param; vmi = (struct aac_vmioctl *)&fib->data[0]; bzero(vmi, sizeof(struct aac_vmioctl)); vmi->Command = VM_Ioctl; vmi->ObjType = FT_DRIVE; vmi->MethId = sc->scsi_method_id; vmi->ObjId = 0; vmi->IoctlCmd = GetBusInfo; aac_vmioctl_tole(vmi); error = aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof(struct aac_vmi_businf_resp)); if (error) { device_printf(sc->aac_dev, "Error %d sending VMIoctl command\n", error); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } vmi_resp = (struct aac_vmi_businf_resp *)&fib->data[0]; aac_vmi_businf_resp_toh(vmi_resp); if (vmi_resp->Status != ST_OK) { device_printf(sc->aac_dev, "VM_Ioctl returned %d\n", vmi_resp->Status); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); return; } bcopy(&vmi_resp->BusInf, &businfo, sizeof(struct aac_getbusinf)); aac_release_sync_fib(sc); mtx_unlock(&sc->aac_io_lock); for (i = 0; i < businfo.BusCount; i++) { if (businfo.BusValid[i] != AAC_BUS_VALID) continue; caminf = (struct aac_sim *)malloc( sizeof(struct aac_sim), M_AACRAIDBUF, M_NOWAIT | M_ZERO); if (caminf == NULL) { device_printf(sc->aac_dev, "No memory to add passthrough bus %d\n", i); break; } child = device_add_child(sc->aac_dev, "aacraidp", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->aac_dev, "device_add_child failed for passthrough bus %d\n", i); free(caminf, M_AACRAIDBUF); break; } caminf->TargetsPerBus = businfo.TargetsPerBus; caminf->BusNumber = i+1; caminf->BusType = PASSTHROUGH_BUS; caminf->InitiatorBusId = -1; caminf->aac_sc = sc; caminf->sim_dev = child; caminf->aac_cam = NULL; device_set_ivars(child, caminf); device_set_desc(child, "SCSI Passthrough Bus"); TAILQ_INSERT_TAIL(&sc->aac_sim_tqh, caminf, sim_link); } } /* * Check to see if the kernel is up and running. If we are in a * BlinkLED state, return the BlinkLED code. */ static u_int32_t aac_check_adapter_health(struct aac_softc *sc, u_int8_t *bled) { u_int32_t ret; ret = AAC_GET_FWSTATUS(sc); if (ret & AAC_UP_AND_RUNNING) ret = 0; else if (ret & AAC_KERNEL_PANIC && bled) *bled = (ret >> 16) & 0xff; return (ret); } /* * Once do an IOP reset, basically have to re-initialize the card as * if coming up from a cold boot, and the driver is responsible for * any IO that was outstanding to the adapter at the time of the IOP * RESET. And prepare the driver for IOP RESET by making the init code * modular with the ability to call it from multiple places. */ static int aac_reset_adapter(struct aac_softc *sc) { struct aac_command *cm; struct aac_fib *fib; struct aac_pause_command *pc; u_int32_t status, reset_mask, waitCount, max_msix_orig; int ret, msi_enabled_orig; fwprintf(sc, HBA_FLAGS_DBG_FUNCTION_ENTRY_B, ""); mtx_assert(&sc->aac_io_lock, MA_OWNED); if (sc->aac_state & AAC_STATE_RESET) { device_printf(sc->aac_dev, "aac_reset_adapter() already in progress\n"); return (EINVAL); } sc->aac_state |= AAC_STATE_RESET; /* disable interrupt */ AAC_ACCESS_DEVREG(sc, AAC_DISABLE_INTERRUPT); /* * Abort all pending commands: * a) on the controller */ while ((cm = aac_dequeue_busy(sc)) != NULL) { cm->cm_flags |= AAC_CMD_RESET; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this * command */ wakeup(cm); } } /* b) in the waiting queues */ while ((cm = aac_dequeue_ready(sc)) != NULL) { cm->cm_flags |= AAC_CMD_RESET; /* is there a completion handler? */ if (cm->cm_complete != NULL) { cm->cm_complete(cm); } else { /* assume that someone is sleeping on this * command */ wakeup(cm); } } /* flush drives */ if (aac_check_adapter_health(sc, NULL) == 0) { mtx_unlock(&sc->aac_io_lock); (void) aacraid_shutdown(sc->aac_dev); mtx_lock(&sc->aac_io_lock); } /* execute IOP reset */ if (sc->aac_support_opt2 & AAC_SUPPORTED_MU_RESET) { AAC_MEM0_SETREG4(sc, AAC_IRCSR, AAC_IRCSR_CORES_RST); /* We need to wait for 5 seconds before accessing the MU again * 10000 * 100us = 1000,000us = 1000ms = 1s */ waitCount = 5 * 10000; while (waitCount) { DELAY(100); /* delay 100 microseconds */ waitCount--; } } else { ret = aacraid_sync_command(sc, AAC_IOP_RESET_ALWAYS, 0, 0, 0, 0, &status, &reset_mask); if (ret && !sc->doorbell_mask) { /* call IOP_RESET for older firmware */ if ((aacraid_sync_command(sc, AAC_IOP_RESET, 0,0,0,0, &status, NULL)) != 0) { if (status == AAC_SRB_STS_INVALID_REQUEST) { device_printf(sc->aac_dev, "IOP_RESET not supported\n"); } else { /* probably timeout */ device_printf(sc->aac_dev, "IOP_RESET failed\n"); } /* unwind aac_shutdown() */ aac_alloc_sync_fib(sc, &fib); pc = (struct aac_pause_command *)&fib->data[0]; pc->Command = VM_ContainerConfig; pc->Type = CT_PAUSE_IO; pc->Timeout = 1; pc->Min = 1; pc->NoRescan = 1; aac_pause_command_tole(pc); (void) aac_sync_fib(sc, ContainerCommand, 0, fib, sizeof (struct aac_pause_command)); aac_release_sync_fib(sc); goto finish; } } else if (sc->doorbell_mask) { ret = 0; reset_mask = sc->doorbell_mask; } if (!ret && (sc->aac_support_opt2 & AAC_SUPPORTED_DOORBELL_RESET)) { AAC_MEM0_SETREG4(sc, AAC_SRC_IDBR, reset_mask); /* * We need to wait for 5 seconds before accessing the * doorbell again; * 10000 * 100us = 1000,000us = 1000ms = 1s */ waitCount = 5 * 10000; while (waitCount) { DELAY(100); /* delay 100 microseconds */ waitCount--; } } } /* * Initialize the adapter. */ max_msix_orig = sc->aac_max_msix; msi_enabled_orig = sc->msi_enabled; sc->msi_enabled = FALSE; if (aac_check_firmware(sc) != 0) goto finish; if (!(sc->flags & AAC_FLAGS_SYNC_MODE)) { sc->aac_max_msix = max_msix_orig; if (msi_enabled_orig) { sc->msi_enabled = msi_enabled_orig; AAC_ACCESS_DEVREG(sc, AAC_ENABLE_MSIX); } mtx_unlock(&sc->aac_io_lock); aac_init(sc); mtx_lock(&sc->aac_io_lock); } finish: sc->aac_state &= ~AAC_STATE_RESET; AAC_ACCESS_DEVREG(sc, AAC_ENABLE_INTERRUPT); aacraid_startio(sc); return (0); } diff --git a/sys/dev/alpm/alpm.c b/sys/dev/alpm/alpm.c index 46dc85ebc485..d7c3d3657d3b 100644 --- a/sys/dev/alpm/alpm.c +++ b/sys/dev/alpm/alpm.c @@ -1,657 +1,658 @@ /*- * Copyright (c) 1998, 1999, 2001 Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Power Management support for the Acer M15x3 chipsets */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "smbus_if.h" #define ALPM_DEBUG(x) if (alpm_debug) (x) #ifdef DEBUG static int alpm_debug = 1; #else static int alpm_debug = 0; #endif #define ACER_M1543_PMU_ID 0x710110b9 /* * I/O registers offsets - the base address is programmed via the * SMBBA PCI configuration register */ #define SMBSTS 0x0 /* SMBus host/slave status register */ #define SMBCMD 0x1 /* SMBus host/slave command register */ #define SMBSTART 0x2 /* start to generate programmed cycle */ #define SMBHADDR 0x3 /* host address register */ #define SMBHDATA 0x4 /* data A register for host controller */ #define SMBHDATB 0x5 /* data B register for host controller */ #define SMBHBLOCK 0x6 /* block register for host controller */ #define SMBHCMD 0x7 /* command register for host controller */ /* SMBHADDR mask. */ #define LSB 0x1 /* XXX: Better name: Read/Write? */ /* SMBSTS masks */ #define TERMINATE 0x80 #define BUS_COLLI 0x40 #define DEVICE_ERR 0x20 #define SMI_I_STS 0x10 #define HST_BSY 0x08 #define IDL_STS 0x04 #define HSTSLV_STS 0x02 #define HSTSLV_BSY 0x01 /* SMBCMD masks */ #define SMB_BLK_CLR 0x80 #define T_OUT_CMD 0x08 #define ABORT_HOST 0x04 /* SMBus commands */ #define SMBQUICK 0x00 #define SMBSRBYTE 0x10 /* send/receive byte */ #define SMBWRBYTE 0x20 /* write/read byte */ #define SMBWRWORD 0x30 /* write/read word */ #define SMBWRBLOCK 0x40 /* write/read block */ /* PCI configuration registers and masks */ #define COM 0x4 #define COM_ENABLE_IO 0x1 #define SMBBA PCIR_BAR(1) #define ATPC 0x5b #define ATPC_SMBCTRL 0x04 /* XX linux has this as 0x6 */ #define SMBHSI 0xe0 #define SMBHSI_SLAVE 0x2 #define SMBHSI_HOST 0x1 #define SMBHCBC 0xe2 #define SMBHCBC_CLOCK 0x70 #define SMBCLOCK_149K 0x0 #define SMBCLOCK_74K 0x20 #define SMBCLOCK_37K 0x40 #define SMBCLOCK_223K 0x80 #define SMBCLOCK_111K 0xa0 #define SMBCLOCK_55K 0xc0 struct alpm_softc { int base; struct resource *res; bus_space_tag_t smbst; bus_space_handle_t smbsh; device_t smbus; struct mtx lock; }; #define ALPM_LOCK(alpm) mtx_lock(&(alpm)->lock) #define ALPM_UNLOCK(alpm) mtx_unlock(&(alpm)->lock) #define ALPM_LOCK_ASSERT(alpm) mtx_assert(&(alpm)->lock, MA_OWNED) #define ALPM_SMBINB(alpm,register) \ (bus_space_read_1(alpm->smbst, alpm->smbsh, register)) #define ALPM_SMBOUTB(alpm,register,value) \ (bus_space_write_1(alpm->smbst, alpm->smbsh, register, value)) static int alpm_detach(device_t dev); static int alpm_probe(device_t dev) { if (pci_get_devid(dev) == ACER_M1543_PMU_ID) { device_set_desc(dev, "AcerLabs M15x3 Power Management Unit"); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int alpm_attach(device_t dev) { int rid; u_int32_t l; struct alpm_softc *alpm; alpm = device_get_softc(dev); /* Unlock SMBIO base register access */ l = pci_read_config(dev, ATPC, 1); pci_write_config(dev, ATPC, l & ~ATPC_SMBCTRL, 1); /* * XX linux sets clock to 74k, should we? l = pci_read_config(dev, SMBHCBC, 1); l &= 0x1f; l |= SMBCLOCK_74K; pci_write_config(dev, SMBHCBC, l, 1); */ if (bootverbose || alpm_debug) { l = pci_read_config(dev, SMBHSI, 1); device_printf(dev, "%s/%s", (l & SMBHSI_HOST) ? "host":"nohost", (l & SMBHSI_SLAVE) ? "slave":"noslave"); l = pci_read_config(dev, SMBHCBC, 1); switch (l & SMBHCBC_CLOCK) { case SMBCLOCK_149K: printf(" 149K"); break; case SMBCLOCK_74K: printf(" 74K"); break; case SMBCLOCK_37K: printf(" 37K"); break; case SMBCLOCK_223K: printf(" 223K"); break; case SMBCLOCK_111K: printf(" 111K"); break; case SMBCLOCK_55K: printf(" 55K"); break; default: printf("unknown"); break; } printf("\n"); } rid = SMBBA; alpm->res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (alpm->res == NULL) { device_printf(dev,"Could not allocate Bus space\n"); return (ENXIO); } alpm->smbst = rman_get_bustag(alpm->res); alpm->smbsh = rman_get_bushandle(alpm->res); mtx_init(&alpm->lock, device_get_nameunit(dev), "alpm", MTX_DEF); /* attach the smbus */ alpm->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY); if (alpm->smbus == NULL) { alpm_detach(dev); return (EINVAL); } bus_attach_children(dev); return (0); } static int alpm_detach(device_t dev) { struct alpm_softc *alpm = device_get_softc(dev); + int error; + + error = bus_generic_detach(dev); + if (error != 0) + return (error); - if (alpm->smbus) { - device_delete_child(dev, alpm->smbus); - alpm->smbus = NULL; - } mtx_destroy(&alpm->lock); if (alpm->res) bus_release_resource(dev, SYS_RES_IOPORT, SMBBA, alpm->res); return (0); } static int alpm_callback(device_t dev, int index, void *data) { int error = 0; switch (index) { case SMB_REQUEST_BUS: case SMB_RELEASE_BUS: /* ok, bus allocation accepted */ break; default: error = EINVAL; } return (error); } static int alpm_clear(struct alpm_softc *sc) { ALPM_SMBOUTB(sc, SMBSTS, 0xff); DELAY(10); return (0); } #if 0 static int alpm_abort(struct alpm_softc *sc) { ALPM_SMBOUTB(sc, SMBCMD, T_OUT_CMD | ABORT_HOST); return (0); } #endif static int alpm_idle(struct alpm_softc *sc) { u_char sts; sts = ALPM_SMBINB(sc, SMBSTS); ALPM_DEBUG(printf("alpm: idle? STS=0x%x\n", sts)); return (sts & IDL_STS); } /* * Poll the SMBus controller */ static int alpm_wait(struct alpm_softc *sc) { int count = 10000; u_char sts = 0; int error; /* wait for command to complete and SMBus controller is idle */ while (count--) { DELAY(10); sts = ALPM_SMBINB(sc, SMBSTS); if (sts & SMI_I_STS) break; } ALPM_DEBUG(printf("alpm: STS=0x%x\n", sts)); error = SMB_ENOERR; if (!count) error |= SMB_ETIMEOUT; if (sts & TERMINATE) error |= SMB_EABORT; if (sts & BUS_COLLI) error |= SMB_ENOACK; if (sts & DEVICE_ERR) error |= SMB_EBUSERR; if (error != SMB_ENOERR) alpm_clear(sc); return (error); } static int alpm_quick(device_t dev, u_char slave, int how) { struct alpm_softc *sc = (struct alpm_softc *)device_get_softc(dev); int error; ALPM_LOCK(sc); alpm_clear(sc); if (!alpm_idle(sc)) { ALPM_UNLOCK(sc); return (EBUSY); } switch (how) { case SMB_QWRITE: ALPM_DEBUG(printf("alpm: QWRITE to 0x%x", slave)); ALPM_SMBOUTB(sc, SMBHADDR, slave & ~LSB); break; case SMB_QREAD: ALPM_DEBUG(printf("alpm: QREAD to 0x%x", slave)); ALPM_SMBOUTB(sc, SMBHADDR, slave | LSB); break; default: panic("%s: unknown QUICK command (%x)!", __func__, how); } ALPM_SMBOUTB(sc, SMBCMD, SMBQUICK); ALPM_SMBOUTB(sc, SMBSTART, 0xff); error = alpm_wait(sc); ALPM_DEBUG(printf(", error=0x%x\n", error)); ALPM_UNLOCK(sc); return (error); } static int alpm_sendb(device_t dev, u_char slave, char byte) { struct alpm_softc *sc = (struct alpm_softc *)device_get_softc(dev); int error; ALPM_LOCK(sc); alpm_clear(sc); if (!alpm_idle(sc)) { ALPM_UNLOCK(sc); return (SMB_EBUSY); } ALPM_SMBOUTB(sc, SMBHADDR, slave & ~LSB); ALPM_SMBOUTB(sc, SMBCMD, SMBSRBYTE); ALPM_SMBOUTB(sc, SMBHDATA, byte); ALPM_SMBOUTB(sc, SMBSTART, 0xff); error = alpm_wait(sc); ALPM_DEBUG(printf("alpm: SENDB to 0x%x, byte=0x%x, error=0x%x\n", slave, byte, error)); ALPM_UNLOCK(sc); return (error); } static int alpm_recvb(device_t dev, u_char slave, char *byte) { struct alpm_softc *sc = (struct alpm_softc *)device_get_softc(dev); int error; ALPM_LOCK(sc); alpm_clear(sc); if (!alpm_idle(sc)) { ALPM_UNLOCK(sc); return (SMB_EBUSY); } ALPM_SMBOUTB(sc, SMBHADDR, slave | LSB); ALPM_SMBOUTB(sc, SMBCMD, SMBSRBYTE); ALPM_SMBOUTB(sc, SMBSTART, 0xff); if ((error = alpm_wait(sc)) == SMB_ENOERR) *byte = ALPM_SMBINB(sc, SMBHDATA); ALPM_DEBUG(printf("alpm: RECVB from 0x%x, byte=0x%x, error=0x%x\n", slave, *byte, error)); ALPM_UNLOCK(sc); return (error); } static int alpm_writeb(device_t dev, u_char slave, char cmd, char byte) { struct alpm_softc *sc = (struct alpm_softc *)device_get_softc(dev); int error; ALPM_LOCK(sc); alpm_clear(sc); if (!alpm_idle(sc)) { ALPM_UNLOCK(sc); return (SMB_EBUSY); } ALPM_SMBOUTB(sc, SMBHADDR, slave & ~LSB); ALPM_SMBOUTB(sc, SMBCMD, SMBWRBYTE); ALPM_SMBOUTB(sc, SMBHDATA, byte); ALPM_SMBOUTB(sc, SMBHCMD, cmd); ALPM_SMBOUTB(sc, SMBSTART, 0xff); error = alpm_wait(sc); ALPM_DEBUG(printf("alpm: WRITEB to 0x%x, cmd=0x%x, byte=0x%x, error=0x%x\n", slave, cmd, byte, error)); ALPM_UNLOCK(sc); return (error); } static int alpm_readb(device_t dev, u_char slave, char cmd, char *byte) { struct alpm_softc *sc = (struct alpm_softc *)device_get_softc(dev); int error; ALPM_LOCK(sc); alpm_clear(sc); if (!alpm_idle(sc)) { ALPM_UNLOCK(sc); return (SMB_EBUSY); } ALPM_SMBOUTB(sc, SMBHADDR, slave | LSB); ALPM_SMBOUTB(sc, SMBCMD, SMBWRBYTE); ALPM_SMBOUTB(sc, SMBHCMD, cmd); ALPM_SMBOUTB(sc, SMBSTART, 0xff); if ((error = alpm_wait(sc)) == SMB_ENOERR) *byte = ALPM_SMBINB(sc, SMBHDATA); ALPM_DEBUG(printf("alpm: READB from 0x%x, cmd=0x%x, byte=0x%x, error=0x%x\n", slave, cmd, *byte, error)); ALPM_UNLOCK(sc); return (error); } static int alpm_writew(device_t dev, u_char slave, char cmd, short word) { struct alpm_softc *sc = (struct alpm_softc *)device_get_softc(dev); int error; ALPM_LOCK(sc); alpm_clear(sc); if (!alpm_idle(sc)) { ALPM_UNLOCK(sc); return (SMB_EBUSY); } ALPM_SMBOUTB(sc, SMBHADDR, slave & ~LSB); ALPM_SMBOUTB(sc, SMBCMD, SMBWRWORD); ALPM_SMBOUTB(sc, SMBHDATA, word & 0x00ff); ALPM_SMBOUTB(sc, SMBHDATB, (word & 0xff00) >> 8); ALPM_SMBOUTB(sc, SMBHCMD, cmd); ALPM_SMBOUTB(sc, SMBSTART, 0xff); error = alpm_wait(sc); ALPM_DEBUG(printf("alpm: WRITEW to 0x%x, cmd=0x%x, word=0x%x, error=0x%x\n", slave, cmd, word, error)); ALPM_UNLOCK(sc); return (error); } static int alpm_readw(device_t dev, u_char slave, char cmd, short *word) { struct alpm_softc *sc = (struct alpm_softc *)device_get_softc(dev); int error; u_char high, low; ALPM_LOCK(sc); alpm_clear(sc); if (!alpm_idle(sc)) { ALPM_UNLOCK(sc); return (SMB_EBUSY); } ALPM_SMBOUTB(sc, SMBHADDR, slave | LSB); ALPM_SMBOUTB(sc, SMBCMD, SMBWRWORD); ALPM_SMBOUTB(sc, SMBHCMD, cmd); ALPM_SMBOUTB(sc, SMBSTART, 0xff); if ((error = alpm_wait(sc)) == SMB_ENOERR) { low = ALPM_SMBINB(sc, SMBHDATA); high = ALPM_SMBINB(sc, SMBHDATB); *word = ((high & 0xff) << 8) | (low & 0xff); } ALPM_DEBUG(printf("alpm: READW from 0x%x, cmd=0x%x, word=0x%x, error=0x%x\n", slave, cmd, *word, error)); ALPM_UNLOCK(sc); return (error); } static int alpm_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf) { struct alpm_softc *sc = (struct alpm_softc *)device_get_softc(dev); u_char i; int error; if (count < 1 || count > 32) return (SMB_EINVAL); ALPM_LOCK(sc); alpm_clear(sc); if(!alpm_idle(sc)) { ALPM_UNLOCK(sc); return (SMB_EBUSY); } ALPM_SMBOUTB(sc, SMBHADDR, slave & ~LSB); /* set the cmd and reset the * 32-byte long internal buffer */ ALPM_SMBOUTB(sc, SMBCMD, SMBWRBLOCK | SMB_BLK_CLR); ALPM_SMBOUTB(sc, SMBHDATA, count); /* fill the 32-byte internal buffer */ for (i = 0; i < count; i++) { ALPM_SMBOUTB(sc, SMBHBLOCK, buf[i]); DELAY(2); } ALPM_SMBOUTB(sc, SMBHCMD, cmd); ALPM_SMBOUTB(sc, SMBSTART, 0xff); error = alpm_wait(sc); ALPM_DEBUG(printf("alpm: WRITEBLK to 0x%x, count=0x%x, cmd=0x%x, error=0x%x", slave, count, cmd, error)); ALPM_UNLOCK(sc); return (error); } static int alpm_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf) { struct alpm_softc *sc = (struct alpm_softc *)device_get_softc(dev); u_char data, len, i; int error; if (*count < 1 || *count > 32) return (SMB_EINVAL); ALPM_LOCK(sc); alpm_clear(sc); if (!alpm_idle(sc)) { ALPM_UNLOCK(sc); return (SMB_EBUSY); } ALPM_SMBOUTB(sc, SMBHADDR, slave | LSB); /* set the cmd and reset the * 32-byte long internal buffer */ ALPM_SMBOUTB(sc, SMBCMD, SMBWRBLOCK | SMB_BLK_CLR); ALPM_SMBOUTB(sc, SMBHCMD, cmd); ALPM_SMBOUTB(sc, SMBSTART, 0xff); if ((error = alpm_wait(sc)) != SMB_ENOERR) goto error; len = ALPM_SMBINB(sc, SMBHDATA); /* read the 32-byte internal buffer */ for (i = 0; i < len; i++) { data = ALPM_SMBINB(sc, SMBHBLOCK); if (i < *count) buf[i] = data; DELAY(2); } *count = len; error: ALPM_DEBUG(printf("alpm: READBLK to 0x%x, count=0x%x, cmd=0x%x, error=0x%x", slave, *count, cmd, error)); ALPM_UNLOCK(sc); return (error); } static device_method_t alpm_methods[] = { /* device interface */ DEVMETHOD(device_probe, alpm_probe), DEVMETHOD(device_attach, alpm_attach), DEVMETHOD(device_detach, alpm_detach), /* smbus interface */ DEVMETHOD(smbus_callback, alpm_callback), DEVMETHOD(smbus_quick, alpm_quick), DEVMETHOD(smbus_sendb, alpm_sendb), DEVMETHOD(smbus_recvb, alpm_recvb), DEVMETHOD(smbus_writeb, alpm_writeb), DEVMETHOD(smbus_readb, alpm_readb), DEVMETHOD(smbus_writew, alpm_writew), DEVMETHOD(smbus_readw, alpm_readw), DEVMETHOD(smbus_bwrite, alpm_bwrite), DEVMETHOD(smbus_bread, alpm_bread), { 0, 0 } }; static driver_t alpm_driver = { "alpm", alpm_methods, sizeof(struct alpm_softc) }; DRIVER_MODULE(alpm, pci, alpm_driver, 0, 0); DRIVER_MODULE(smbus, alpm, smbus_driver, 0, 0); MODULE_DEPEND(alpm, pci, 1, 1, 1); MODULE_DEPEND(alpm, smbus, SMBUS_MINVER, SMBUS_PREFVER, SMBUS_MAXVER); MODULE_VERSION(alpm, 1); diff --git a/sys/dev/amdpm/amdpm.c b/sys/dev/amdpm/amdpm.c index 22b252f572b0..d744c0aa5d4b 100644 --- a/sys/dev/amdpm/amdpm.c +++ b/sys/dev/amdpm/amdpm.c @@ -1,665 +1,665 @@ /*- * Copyright (c) 2000 Matthew C. Forman * * Based (heavily) on alpm.c which is: * * Copyright (c) 1998, 1999 Nicolas Souchu * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Power management function/SMBus function support for the AMD 756 chip. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "smbus_if.h" #define AMDPM_DEBUG(x) if (amdpm_debug) (x) #ifdef DEBUG static int amdpm_debug = 1; #else static int amdpm_debug = 0; #endif #define AMDPM_VENDORID_AMD 0x1022 #define AMDPM_DEVICEID_AMD756PM 0x740b #define AMDPM_DEVICEID_AMD766PM 0x7413 #define AMDPM_DEVICEID_AMD768PM 0x7443 #define AMDPM_DEVICEID_AMD8111PM 0x746B #define AMDPM_VENDORID_HYGON 0x1d94 /* nVidia nForce chipset */ #define AMDPM_VENDORID_NVIDIA 0x10de #define AMDPM_DEVICEID_NF_SMB 0x01b4 /* PCI Configuration space registers */ #define AMDPCI_PMBASE 0x58 #define NFPCI_PMBASE 0x14 #define AMDPCI_GEN_CONFIG_PM 0x41 #define AMDPCI_PMIOEN (1<<7) #define AMDPCI_SCIINT_CONFIG_PM 0x42 #define AMDPCI_SCISEL_IRQ11 11 #define AMDPCI_REVID 0x08 /* * I/O registers. * Base address programmed via AMDPCI_PMBASE. */ #define AMDSMB_GLOBAL_STATUS (0x00) #define AMDSMB_GS_TO_STS (1<<5) #define AMDSMB_GS_HCYC_STS (1<<4) #define AMDSMB_GS_HST_STS (1<<3) #define AMDSMB_GS_PRERR_STS (1<<2) #define AMDSMB_GS_COL_STS (1<<1) #define AMDSMB_GS_ABRT_STS (1<<0) #define AMDSMB_GS_CLEAR_STS (AMDSMB_GS_TO_STS|AMDSMB_GS_HCYC_STS|AMDSMB_GS_PRERR_STS|AMDSMB_GS_COL_STS|AMDSMB_GS_ABRT_STS) #define AMDSMB_GLOBAL_ENABLE (0x02) #define AMDSMB_GE_ABORT (1<<5) #define AMDSMB_GE_HCYC_EN (1<<4) #define AMDSMB_GE_HOST_STC (1<<3) #define AMDSMB_GE_CYC_QUICK 0 #define AMDSMB_GE_CYC_BYTE 1 #define AMDSMB_GE_CYC_BDATA 2 #define AMDSMB_GE_CYC_WDATA 3 #define AMDSMB_GE_CYC_PROCCALL 4 #define AMDSMB_GE_CYC_BLOCK 5 #define LSB 0x1 /* XXX: Better name: Read/Write? */ #define AMDSMB_HSTADDR (0x04) #define AMDSMB_HSTDATA (0x06) #define AMDSMB_HSTCMD (0x08) #define AMDSMB_HSTDFIFO (0x09) #define AMDSMB_HSLVDATA (0x0A) #define AMDSMB_HSLVDA (0x0C) #define AMDSMB_HSLVDDR (0x0E) #define AMDSMB_SNPADDR (0x0F) struct amdpm_softc { int base; int rid; struct resource *res; device_t smbus; struct mtx lock; }; #define AMDPM_LOCK(amdpm) mtx_lock(&(amdpm)->lock) #define AMDPM_UNLOCK(amdpm) mtx_unlock(&(amdpm)->lock) #define AMDPM_LOCK_ASSERT(amdpm) mtx_assert(&(amdpm)->lock, MA_OWNED) #define AMDPM_SMBINB(amdpm,register) \ (bus_read_1(amdpm->res, register)) #define AMDPM_SMBOUTB(amdpm,register,value) \ (bus_write_1(amdpm->res, register, value)) #define AMDPM_SMBINW(amdpm,register) \ (bus_read_2(amdpm->res, register)) #define AMDPM_SMBOUTW(amdpm,register,value) \ (bus_write_2(amdpm->res, register, value)) static int amdpm_detach(device_t dev); static int amdpm_probe(device_t dev) { u_long base; u_int16_t vid; u_int16_t did; vid = pci_get_vendor(dev); did = pci_get_device(dev); if ((vid == AMDPM_VENDORID_AMD) && ((did == AMDPM_DEVICEID_AMD756PM) || (did == AMDPM_DEVICEID_AMD766PM) || (did == AMDPM_DEVICEID_AMD768PM) || (did == AMDPM_DEVICEID_AMD8111PM))) { device_set_desc(dev, "AMD 756/766/768/8111 Power Management Controller"); /* * We have to do this, since the BIOS won't give us the * resource info (not mine, anyway). */ base = pci_read_config(dev, AMDPCI_PMBASE, 4); base &= 0xff00; bus_set_resource(dev, SYS_RES_IOPORT, AMDPCI_PMBASE, base+0xe0, 32); return (BUS_PROBE_DEFAULT); } if ((vid == AMDPM_VENDORID_NVIDIA) && (did == AMDPM_DEVICEID_NF_SMB)) { device_set_desc(dev, "nForce SMBus Controller"); /* * We have to do this, since the BIOS won't give us the * resource info (not mine, anyway). */ base = pci_read_config(dev, NFPCI_PMBASE, 4); base &= 0xff00; bus_set_resource(dev, SYS_RES_IOPORT, NFPCI_PMBASE, base, 32); return (BUS_PROBE_DEFAULT); } return ENXIO; } static int amdpm_attach(device_t dev) { struct amdpm_softc *amdpm_sc = device_get_softc(dev); u_char val_b; /* Enable I/O block access */ val_b = pci_read_config(dev, AMDPCI_GEN_CONFIG_PM, 1); pci_write_config(dev, AMDPCI_GEN_CONFIG_PM, val_b | AMDPCI_PMIOEN, 1); /* Allocate I/O space */ if (pci_get_vendor(dev) == AMDPM_VENDORID_AMD || pci_get_vendor(dev) == AMDPM_VENDORID_HYGON) amdpm_sc->rid = AMDPCI_PMBASE; else amdpm_sc->rid = NFPCI_PMBASE; amdpm_sc->res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &amdpm_sc->rid, RF_ACTIVE); if (amdpm_sc->res == NULL) { device_printf(dev, "could not map i/o space\n"); return (ENXIO); } mtx_init(&amdpm_sc->lock, device_get_nameunit(dev), "amdpm", MTX_DEF); /* Allocate a new smbus device */ amdpm_sc->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY); if (!amdpm_sc->smbus) { amdpm_detach(dev); return (EINVAL); } bus_attach_children(dev); return (0); } static int amdpm_detach(device_t dev) { struct amdpm_softc *amdpm_sc = device_get_softc(dev); + int error; - if (amdpm_sc->smbus) { - device_delete_child(dev, amdpm_sc->smbus); - amdpm_sc->smbus = NULL; - } + error = bus_generic_detach(dev); + if (error != 0) + return (error); mtx_destroy(&amdpm_sc->lock); if (amdpm_sc->res) bus_release_resource(dev, SYS_RES_IOPORT, amdpm_sc->rid, amdpm_sc->res); return (0); } static int amdpm_callback(device_t dev, int index, void *data) { int error = 0; switch (index) { case SMB_REQUEST_BUS: case SMB_RELEASE_BUS: break; default: error = EINVAL; } return (error); } static int amdpm_clear(struct amdpm_softc *sc) { AMDPM_LOCK_ASSERT(sc); AMDPM_SMBOUTW(sc, AMDSMB_GLOBAL_STATUS, AMDSMB_GS_CLEAR_STS); DELAY(10); return (0); } #if 0 static int amdpm_abort(struct amdpm_softc *sc) { u_short l; l = AMDPM_SMBINW(sc, AMDSMB_GLOBAL_ENABLE); AMDPM_SMBOUTW(sc, AMDSMB_GLOBAL_ENABLE, l | AMDSMB_GE_ABORT); return (0); } #endif static int amdpm_idle(struct amdpm_softc *sc) { u_short sts; AMDPM_LOCK_ASSERT(sc); sts = AMDPM_SMBINW(sc, AMDSMB_GLOBAL_STATUS); AMDPM_DEBUG(printf("amdpm: busy? STS=0x%x\n", sts)); return (~(sts & AMDSMB_GS_HST_STS)); } /* * Poll the SMBus controller */ static int amdpm_wait(struct amdpm_softc *sc) { int count = 10000; u_short sts = 0; int error; AMDPM_LOCK_ASSERT(sc); /* Wait for command to complete (SMBus controller is idle) */ while(count--) { DELAY(10); sts = AMDPM_SMBINW(sc, AMDSMB_GLOBAL_STATUS); if (!(sts & AMDSMB_GS_HST_STS)) break; } AMDPM_DEBUG(printf("amdpm: STS=0x%x (count=%d)\n", sts, count)); error = SMB_ENOERR; if (!count) error |= SMB_ETIMEOUT; if (sts & AMDSMB_GS_ABRT_STS) error |= SMB_EABORT; if (sts & AMDSMB_GS_COL_STS) error |= SMB_ENOACK; if (sts & AMDSMB_GS_PRERR_STS) error |= SMB_EBUSERR; if (error != SMB_ENOERR) amdpm_clear(sc); return (error); } static int amdpm_quick(device_t dev, u_char slave, int how) { struct amdpm_softc *sc = (struct amdpm_softc *)device_get_softc(dev); int error; u_short l; AMDPM_LOCK(sc); amdpm_clear(sc); if (!amdpm_idle(sc)) { AMDPM_UNLOCK(sc); return (EBUSY); } switch (how) { case SMB_QWRITE: AMDPM_DEBUG(printf("amdpm: QWRITE to 0x%x", slave)); AMDPM_SMBOUTW(sc, AMDSMB_HSTADDR, slave & ~LSB); break; case SMB_QREAD: AMDPM_DEBUG(printf("amdpm: QREAD to 0x%x", slave)); AMDPM_SMBOUTW(sc, AMDSMB_HSTADDR, slave | LSB); break; default: panic("%s: unknown QUICK command (%x)!", __func__, how); } l = AMDPM_SMBINW(sc, AMDSMB_GLOBAL_ENABLE); AMDPM_SMBOUTW(sc, AMDSMB_GLOBAL_ENABLE, (l & 0xfff8) | AMDSMB_GE_CYC_QUICK | AMDSMB_GE_HOST_STC); error = amdpm_wait(sc); AMDPM_DEBUG(printf(", error=0x%x\n", error)); AMDPM_UNLOCK(sc); return (error); } static int amdpm_sendb(device_t dev, u_char slave, char byte) { struct amdpm_softc *sc = (struct amdpm_softc *)device_get_softc(dev); int error; u_short l; AMDPM_LOCK(sc); amdpm_clear(sc); if (!amdpm_idle(sc)) { AMDPM_UNLOCK(sc); return (SMB_EBUSY); } AMDPM_SMBOUTW(sc, AMDSMB_HSTADDR, slave & ~LSB); AMDPM_SMBOUTW(sc, AMDSMB_HSTDATA, byte); l = AMDPM_SMBINW(sc, AMDSMB_GLOBAL_ENABLE); AMDPM_SMBOUTW(sc, AMDSMB_GLOBAL_ENABLE, (l & 0xfff8) | AMDSMB_GE_CYC_BYTE | AMDSMB_GE_HOST_STC); error = amdpm_wait(sc); AMDPM_DEBUG(printf("amdpm: SENDB to 0x%x, byte=0x%x, error=0x%x\n", slave, byte, error)); AMDPM_UNLOCK(sc); return (error); } static int amdpm_recvb(device_t dev, u_char slave, char *byte) { struct amdpm_softc *sc = (struct amdpm_softc *)device_get_softc(dev); int error; u_short l; AMDPM_LOCK(sc); amdpm_clear(sc); if (!amdpm_idle(sc)) { AMDPM_UNLOCK(sc); return (SMB_EBUSY); } AMDPM_SMBOUTW(sc, AMDSMB_HSTADDR, slave | LSB); l = AMDPM_SMBINW(sc, AMDSMB_GLOBAL_ENABLE); AMDPM_SMBOUTW(sc, AMDSMB_GLOBAL_ENABLE, (l & 0xfff8) | AMDSMB_GE_CYC_BYTE | AMDSMB_GE_HOST_STC); if ((error = amdpm_wait(sc)) == SMB_ENOERR) *byte = AMDPM_SMBINW(sc, AMDSMB_HSTDATA); AMDPM_DEBUG(printf("amdpm: RECVB from 0x%x, byte=0x%x, error=0x%x\n", slave, *byte, error)); AMDPM_UNLOCK(sc); return (error); } static int amdpm_writeb(device_t dev, u_char slave, char cmd, char byte) { struct amdpm_softc *sc = (struct amdpm_softc *)device_get_softc(dev); int error; u_short l; AMDPM_LOCK(sc); amdpm_clear(sc); if (!amdpm_idle(sc)) { AMDPM_UNLOCK(sc); return (SMB_EBUSY); } AMDPM_SMBOUTW(sc, AMDSMB_HSTADDR, slave & ~LSB); AMDPM_SMBOUTW(sc, AMDSMB_HSTDATA, byte); AMDPM_SMBOUTB(sc, AMDSMB_HSTCMD, cmd); l = AMDPM_SMBINW(sc, AMDSMB_GLOBAL_ENABLE); AMDPM_SMBOUTW(sc, AMDSMB_GLOBAL_ENABLE, (l & 0xfff8) | AMDSMB_GE_CYC_BDATA | AMDSMB_GE_HOST_STC); error = amdpm_wait(sc); AMDPM_DEBUG(printf("amdpm: WRITEB to 0x%x, cmd=0x%x, byte=0x%x, error=0x%x\n", slave, cmd, byte, error)); AMDPM_UNLOCK(sc); return (error); } static int amdpm_readb(device_t dev, u_char slave, char cmd, char *byte) { struct amdpm_softc *sc = (struct amdpm_softc *)device_get_softc(dev); int error; u_short l; AMDPM_LOCK(sc); amdpm_clear(sc); if (!amdpm_idle(sc)) { AMDPM_UNLOCK(sc); return (SMB_EBUSY); } AMDPM_SMBOUTW(sc, AMDSMB_HSTADDR, slave | LSB); AMDPM_SMBOUTB(sc, AMDSMB_HSTCMD, cmd); l = AMDPM_SMBINW(sc, AMDSMB_GLOBAL_ENABLE); AMDPM_SMBOUTW(sc, AMDSMB_GLOBAL_ENABLE, (l & 0xfff8) | AMDSMB_GE_CYC_BDATA | AMDSMB_GE_HOST_STC); if ((error = amdpm_wait(sc)) == SMB_ENOERR) *byte = AMDPM_SMBINW(sc, AMDSMB_HSTDATA); AMDPM_DEBUG(printf("amdpm: READB from 0x%x, cmd=0x%x, byte=0x%x, error=0x%x\n", slave, cmd, *byte, error)); AMDPM_UNLOCK(sc); return (error); } static int amdpm_writew(device_t dev, u_char slave, char cmd, short word) { struct amdpm_softc *sc = (struct amdpm_softc *)device_get_softc(dev); int error; u_short l; AMDPM_LOCK(sc); amdpm_clear(sc); if (!amdpm_idle(sc)) { AMDPM_UNLOCK(sc); return (SMB_EBUSY); } AMDPM_SMBOUTW(sc, AMDSMB_HSTADDR, slave & ~LSB); AMDPM_SMBOUTW(sc, AMDSMB_HSTDATA, word); AMDPM_SMBOUTB(sc, AMDSMB_HSTCMD, cmd); l = AMDPM_SMBINW(sc, AMDSMB_GLOBAL_ENABLE); AMDPM_SMBOUTW(sc, AMDSMB_GLOBAL_ENABLE, (l & 0xfff8) | AMDSMB_GE_CYC_WDATA | AMDSMB_GE_HOST_STC); error = amdpm_wait(sc); AMDPM_DEBUG(printf("amdpm: WRITEW to 0x%x, cmd=0x%x, word=0x%x, error=0x%x\n", slave, cmd, word, error)); AMDPM_UNLOCK(sc); return (error); } static int amdpm_readw(device_t dev, u_char slave, char cmd, short *word) { struct amdpm_softc *sc = (struct amdpm_softc *)device_get_softc(dev); int error; u_short l; AMDPM_LOCK(sc); amdpm_clear(sc); if (!amdpm_idle(sc)) { AMDPM_UNLOCK(sc); return (SMB_EBUSY); } AMDPM_SMBOUTW(sc, AMDSMB_HSTADDR, slave | LSB); AMDPM_SMBOUTB(sc, AMDSMB_HSTCMD, cmd); l = AMDPM_SMBINW(sc, AMDSMB_GLOBAL_ENABLE); AMDPM_SMBOUTW(sc, AMDSMB_GLOBAL_ENABLE, (l & 0xfff8) | AMDSMB_GE_CYC_WDATA | AMDSMB_GE_HOST_STC); if ((error = amdpm_wait(sc)) == SMB_ENOERR) *word = AMDPM_SMBINW(sc, AMDSMB_HSTDATA); AMDPM_DEBUG(printf("amdpm: READW from 0x%x, cmd=0x%x, word=0x%x, error=0x%x\n", slave, cmd, *word, error)); AMDPM_UNLOCK(sc); return (error); } static int amdpm_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf) { struct amdpm_softc *sc = (struct amdpm_softc *)device_get_softc(dev); u_char i; int error; u_short l; if (count < 1 || count > 32) return (SMB_EINVAL); AMDPM_LOCK(sc); amdpm_clear(sc); if (!amdpm_idle(sc)) { AMDPM_UNLOCK(sc); return (SMB_EBUSY); } AMDPM_SMBOUTW(sc, AMDSMB_HSTADDR, slave & ~LSB); /* * Do we have to reset the internal 32-byte buffer? * Can't see how to do this from the data sheet. */ AMDPM_SMBOUTW(sc, AMDSMB_HSTDATA, count); /* Fill the 32-byte internal buffer */ for (i = 0; i < count; i++) { AMDPM_SMBOUTB(sc, AMDSMB_HSTDFIFO, buf[i]); DELAY(2); } AMDPM_SMBOUTB(sc, AMDSMB_HSTCMD, cmd); l = AMDPM_SMBINW(sc, AMDSMB_GLOBAL_ENABLE); AMDPM_SMBOUTW(sc, AMDSMB_GLOBAL_ENABLE, (l & 0xfff8) | AMDSMB_GE_CYC_BLOCK | AMDSMB_GE_HOST_STC); error = amdpm_wait(sc); AMDPM_DEBUG(printf("amdpm: WRITEBLK to 0x%x, count=0x%x, cmd=0x%x, error=0x%x", slave, count, cmd, error)); AMDPM_UNLOCK(sc); return (error); } static int amdpm_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf) { struct amdpm_softc *sc = (struct amdpm_softc *)device_get_softc(dev); u_char data, len, i; int error; u_short l; if (*count < 1 || *count > 32) return (SMB_EINVAL); AMDPM_LOCK(sc); amdpm_clear(sc); if (!amdpm_idle(sc)) { AMDPM_UNLOCK(sc); return (SMB_EBUSY); } AMDPM_SMBOUTW(sc, AMDSMB_HSTADDR, slave | LSB); AMDPM_SMBOUTB(sc, AMDSMB_HSTCMD, cmd); l = AMDPM_SMBINW(sc, AMDSMB_GLOBAL_ENABLE); AMDPM_SMBOUTW(sc, AMDSMB_GLOBAL_ENABLE, (l & 0xfff8) | AMDSMB_GE_CYC_BLOCK | AMDSMB_GE_HOST_STC); if ((error = amdpm_wait(sc)) != SMB_ENOERR) goto error; len = AMDPM_SMBINW(sc, AMDSMB_HSTDATA); /* Read the 32-byte internal buffer */ for (i = 0; i < len; i++) { data = AMDPM_SMBINB(sc, AMDSMB_HSTDFIFO); if (i < *count) buf[i] = data; DELAY(2); } *count = len; error: AMDPM_DEBUG(printf("amdpm: READBLK to 0x%x, count=0x%x, cmd=0x%x, error=0x%x", slave, *count, cmd, error)); AMDPM_UNLOCK(sc); return (error); } static device_method_t amdpm_methods[] = { /* Device interface */ DEVMETHOD(device_probe, amdpm_probe), DEVMETHOD(device_attach, amdpm_attach), DEVMETHOD(device_detach, amdpm_detach), /* SMBus interface */ DEVMETHOD(smbus_callback, amdpm_callback), DEVMETHOD(smbus_quick, amdpm_quick), DEVMETHOD(smbus_sendb, amdpm_sendb), DEVMETHOD(smbus_recvb, amdpm_recvb), DEVMETHOD(smbus_writeb, amdpm_writeb), DEVMETHOD(smbus_readb, amdpm_readb), DEVMETHOD(smbus_writew, amdpm_writew), DEVMETHOD(smbus_readw, amdpm_readw), DEVMETHOD(smbus_bwrite, amdpm_bwrite), DEVMETHOD(smbus_bread, amdpm_bread), { 0, 0 } }; static driver_t amdpm_driver = { "amdpm", amdpm_methods, sizeof(struct amdpm_softc), }; DRIVER_MODULE(amdpm, pci, amdpm_driver, 0, 0); DRIVER_MODULE(smbus, amdpm, smbus_driver, 0, 0); MODULE_DEPEND(amdpm, pci, 1, 1, 1); MODULE_DEPEND(amdpm, smbus, SMBUS_MINVER, SMBUS_PREFVER, SMBUS_MAXVER); MODULE_VERSION(amdpm, 1); diff --git a/sys/dev/amdsmb/amdsmb.c b/sys/dev/amdsmb/amdsmb.c index 00e2f979b6cc..3d2e7a5e0c19 100644 --- a/sys/dev/amdsmb/amdsmb.c +++ b/sys/dev/amdsmb/amdsmb.c @@ -1,577 +1,577 @@ /*- * Copyright (c) 2005 Ruslan Ermilov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "smbus_if.h" #define AMDSMB_DEBUG(x) if (amdsmb_debug) (x) #ifdef DEBUG static int amdsmb_debug = 1; #else static int amdsmb_debug = 0; #endif #define AMDSMB_VENDORID_AMD 0x1022 #define AMDSMB_DEVICEID_AMD8111_SMB2 0x746a /* * ACPI 3.0, Chapter 12, Embedded Controller Interface. */ #define EC_DATA 0x00 /* data register */ #define EC_SC 0x04 /* status of controller */ #define EC_CMD 0x04 /* command register */ #define EC_SC_IBF 0x02 /* data ready for embedded controller */ #define EC_SC_OBF 0x01 /* data ready for host */ #define EC_CMD_WR 0x81 /* write EC */ #define EC_CMD_RD 0x80 /* read EC */ /* * ACPI 3.0, Chapter 12, SMBus Host Controller Interface. */ #define SMB_PRTCL 0x00 /* protocol */ #define SMB_STS 0x01 /* status */ #define SMB_ADDR 0x02 /* address */ #define SMB_CMD 0x03 /* command */ #define SMB_DATA 0x04 /* 32 data registers */ #define SMB_BCNT 0x24 /* number of data bytes */ #define SMB_ALRM_A 0x25 /* alarm address */ #define SMB_ALRM_D 0x26 /* 2 bytes alarm data */ #define SMB_STS_DONE 0x80 #define SMB_STS_ALRM 0x40 #define SMB_STS_RES 0x20 #define SMB_STS_STATUS 0x1f #define SMB_STS_OK 0x00 /* OK */ #define SMB_STS_UF 0x07 /* Unknown Failure */ #define SMB_STS_DANA 0x10 /* Device Address Not Acknowledged */ #define SMB_STS_DED 0x11 /* Device Error Detected */ #define SMB_STS_DCAD 0x12 /* Device Command Access Denied */ #define SMB_STS_UE 0x13 /* Unknown Error */ #define SMB_STS_DAD 0x17 /* Device Access Denied */ #define SMB_STS_T 0x18 /* Timeout */ #define SMB_STS_HUP 0x19 /* Host Unsupported Protocol */ #define SMB_STS_B 0x1a /* Busy */ #define SMB_STS_PEC 0x1f /* PEC (CRC-8) Error */ #define SMB_PRTCL_WRITE 0x00 #define SMB_PRTCL_READ 0x01 #define SMB_PRTCL_QUICK 0x02 #define SMB_PRTCL_BYTE 0x04 #define SMB_PRTCL_BYTE_DATA 0x06 #define SMB_PRTCL_WORD_DATA 0x08 #define SMB_PRTCL_BLOCK_DATA 0x0a #define SMB_PRTCL_PROC_CALL 0x0c #define SMB_PRTCL_BLOCK_PROC_CALL 0x0d #define SMB_PRTCL_PEC 0x80 struct amdsmb_softc { int rid; struct resource *res; device_t smbus; struct mtx lock; }; #define AMDSMB_LOCK(amdsmb) mtx_lock(&(amdsmb)->lock) #define AMDSMB_UNLOCK(amdsmb) mtx_unlock(&(amdsmb)->lock) #define AMDSMB_LOCK_ASSERT(amdsmb) mtx_assert(&(amdsmb)->lock, MA_OWNED) #define AMDSMB_ECINB(amdsmb, register) \ (bus_read_1(amdsmb->res, register)) #define AMDSMB_ECOUTB(amdsmb, register, value) \ (bus_write_1(amdsmb->res, register, value)) static int amdsmb_detach(device_t dev); struct pci_device_table amdsmb_devs[] = { { PCI_DEV(AMDSMB_VENDORID_AMD, AMDSMB_DEVICEID_AMD8111_SMB2), PCI_DESCR("AMD-8111 SMBus 2.0 Controller") } }; static int amdsmb_probe(device_t dev) { const struct pci_device_table *tbl; tbl = PCI_MATCH(dev, amdsmb_devs); if (tbl == NULL) return (ENXIO); device_set_desc(dev, tbl->descr); return (BUS_PROBE_DEFAULT); } static int amdsmb_attach(device_t dev) { struct amdsmb_softc *amdsmb_sc = device_get_softc(dev); /* Allocate I/O space */ amdsmb_sc->rid = PCIR_BAR(0); amdsmb_sc->res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &amdsmb_sc->rid, RF_ACTIVE); if (amdsmb_sc->res == NULL) { device_printf(dev, "could not map i/o space\n"); return (ENXIO); } mtx_init(&amdsmb_sc->lock, device_get_nameunit(dev), "amdsmb", MTX_DEF); /* Allocate a new smbus device */ amdsmb_sc->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY); if (!amdsmb_sc->smbus) { amdsmb_detach(dev); return (EINVAL); } bus_attach_children(dev); return (0); } static int amdsmb_detach(device_t dev) { struct amdsmb_softc *amdsmb_sc = device_get_softc(dev); + int error; - if (amdsmb_sc->smbus) { - device_delete_child(dev, amdsmb_sc->smbus); - amdsmb_sc->smbus = NULL; - } + error = bus_generic_detach(dev); + if (error != 0) + return (error); mtx_destroy(&amdsmb_sc->lock); if (amdsmb_sc->res) bus_release_resource(dev, SYS_RES_IOPORT, amdsmb_sc->rid, amdsmb_sc->res); return (0); } static int amdsmb_callback(device_t dev, int index, void *data) { int error = 0; switch (index) { case SMB_REQUEST_BUS: case SMB_RELEASE_BUS: break; default: error = EINVAL; } return (error); } static int amdsmb_ec_wait_write(struct amdsmb_softc *sc) { int timeout = 500; while (timeout-- && AMDSMB_ECINB(sc, EC_SC) & EC_SC_IBF) DELAY(1); if (timeout == 0) { device_printf(sc->smbus, "timeout waiting for IBF to clear\n"); return (1); } return (0); } static int amdsmb_ec_wait_read(struct amdsmb_softc *sc) { int timeout = 500; while (timeout-- && ~AMDSMB_ECINB(sc, EC_SC) & EC_SC_OBF) DELAY(1); if (timeout == 0) { device_printf(sc->smbus, "timeout waiting for OBF to set\n"); return (1); } return (0); } static int amdsmb_ec_read(struct amdsmb_softc *sc, u_char addr, u_char *data) { AMDSMB_LOCK_ASSERT(sc); if (amdsmb_ec_wait_write(sc)) return (1); AMDSMB_ECOUTB(sc, EC_CMD, EC_CMD_RD); if (amdsmb_ec_wait_write(sc)) return (1); AMDSMB_ECOUTB(sc, EC_DATA, addr); if (amdsmb_ec_wait_read(sc)) return (1); *data = AMDSMB_ECINB(sc, EC_DATA); return (0); } static int amdsmb_ec_write(struct amdsmb_softc *sc, u_char addr, u_char data) { AMDSMB_LOCK_ASSERT(sc); if (amdsmb_ec_wait_write(sc)) return (1); AMDSMB_ECOUTB(sc, EC_CMD, EC_CMD_WR); if (amdsmb_ec_wait_write(sc)) return (1); AMDSMB_ECOUTB(sc, EC_DATA, addr); if (amdsmb_ec_wait_write(sc)) return (1); AMDSMB_ECOUTB(sc, EC_DATA, data); return (0); } static int amdsmb_wait(struct amdsmb_softc *sc) { u_char sts, temp; int error, count; AMDSMB_LOCK_ASSERT(sc); amdsmb_ec_read(sc, SMB_PRTCL, &temp); if (temp != 0) { count = 10000; do { DELAY(500); amdsmb_ec_read(sc, SMB_PRTCL, &temp); } while (temp != 0 && count--); if (count == 0) return (SMB_ETIMEOUT); } amdsmb_ec_read(sc, SMB_STS, &sts); sts &= SMB_STS_STATUS; AMDSMB_DEBUG(printf("amdsmb: STS=0x%x\n", sts)); switch (sts) { case SMB_STS_OK: error = SMB_ENOERR; break; case SMB_STS_DANA: error = SMB_ENOACK; break; case SMB_STS_B: error = SMB_EBUSY; break; case SMB_STS_T: error = SMB_ETIMEOUT; break; case SMB_STS_DCAD: case SMB_STS_DAD: case SMB_STS_HUP: error = SMB_ENOTSUPP; break; default: error = SMB_EBUSERR; break; } return (error); } static int amdsmb_quick(device_t dev, u_char slave, int how) { struct amdsmb_softc *sc = (struct amdsmb_softc *)device_get_softc(dev); u_char protocol; int error; protocol = SMB_PRTCL_QUICK; switch (how) { case SMB_QWRITE: protocol |= SMB_PRTCL_WRITE; AMDSMB_DEBUG(printf("amdsmb: QWRITE to 0x%x", slave)); break; case SMB_QREAD: protocol |= SMB_PRTCL_READ; AMDSMB_DEBUG(printf("amdsmb: QREAD to 0x%x", slave)); break; default: panic("%s: unknown QUICK command (%x)!", __func__, how); } AMDSMB_LOCK(sc); amdsmb_ec_write(sc, SMB_ADDR, slave); amdsmb_ec_write(sc, SMB_PRTCL, protocol); error = amdsmb_wait(sc); AMDSMB_DEBUG(printf(", error=0x%x\n", error)); AMDSMB_UNLOCK(sc); return (error); } static int amdsmb_sendb(device_t dev, u_char slave, char byte) { struct amdsmb_softc *sc = (struct amdsmb_softc *)device_get_softc(dev); int error; AMDSMB_LOCK(sc); amdsmb_ec_write(sc, SMB_CMD, byte); amdsmb_ec_write(sc, SMB_ADDR, slave); amdsmb_ec_write(sc, SMB_PRTCL, SMB_PRTCL_WRITE | SMB_PRTCL_BYTE); error = amdsmb_wait(sc); AMDSMB_DEBUG(printf("amdsmb: SENDB to 0x%x, byte=0x%x, error=0x%x\n", slave, byte, error)); AMDSMB_UNLOCK(sc); return (error); } static int amdsmb_recvb(device_t dev, u_char slave, char *byte) { struct amdsmb_softc *sc = (struct amdsmb_softc *)device_get_softc(dev); int error; AMDSMB_LOCK(sc); amdsmb_ec_write(sc, SMB_ADDR, slave); amdsmb_ec_write(sc, SMB_PRTCL, SMB_PRTCL_READ | SMB_PRTCL_BYTE); if ((error = amdsmb_wait(sc)) == SMB_ENOERR) amdsmb_ec_read(sc, SMB_DATA, byte); AMDSMB_DEBUG(printf("amdsmb: RECVB from 0x%x, byte=0x%x, error=0x%x\n", slave, *byte, error)); AMDSMB_UNLOCK(sc); return (error); } static int amdsmb_writeb(device_t dev, u_char slave, char cmd, char byte) { struct amdsmb_softc *sc = (struct amdsmb_softc *)device_get_softc(dev); int error; AMDSMB_LOCK(sc); amdsmb_ec_write(sc, SMB_CMD, cmd); amdsmb_ec_write(sc, SMB_DATA, byte); amdsmb_ec_write(sc, SMB_ADDR, slave); amdsmb_ec_write(sc, SMB_PRTCL, SMB_PRTCL_WRITE | SMB_PRTCL_BYTE_DATA); error = amdsmb_wait(sc); AMDSMB_DEBUG(printf("amdsmb: WRITEB to 0x%x, cmd=0x%x, byte=0x%x, " "error=0x%x\n", slave, cmd, byte, error)); AMDSMB_UNLOCK(sc); return (error); } static int amdsmb_readb(device_t dev, u_char slave, char cmd, char *byte) { struct amdsmb_softc *sc = (struct amdsmb_softc *)device_get_softc(dev); int error; AMDSMB_LOCK(sc); amdsmb_ec_write(sc, SMB_CMD, cmd); amdsmb_ec_write(sc, SMB_ADDR, slave); amdsmb_ec_write(sc, SMB_PRTCL, SMB_PRTCL_READ | SMB_PRTCL_BYTE_DATA); if ((error = amdsmb_wait(sc)) == SMB_ENOERR) amdsmb_ec_read(sc, SMB_DATA, byte); AMDSMB_DEBUG(printf("amdsmb: READB from 0x%x, cmd=0x%x, byte=0x%x, " "error=0x%x\n", slave, cmd, (unsigned char)*byte, error)); AMDSMB_UNLOCK(sc); return (error); } static int amdsmb_writew(device_t dev, u_char slave, char cmd, short word) { struct amdsmb_softc *sc = (struct amdsmb_softc *)device_get_softc(dev); int error; AMDSMB_LOCK(sc); amdsmb_ec_write(sc, SMB_CMD, cmd); amdsmb_ec_write(sc, SMB_DATA, word); amdsmb_ec_write(sc, SMB_DATA + 1, word >> 8); amdsmb_ec_write(sc, SMB_ADDR, slave); amdsmb_ec_write(sc, SMB_PRTCL, SMB_PRTCL_WRITE | SMB_PRTCL_WORD_DATA); error = amdsmb_wait(sc); AMDSMB_DEBUG(printf("amdsmb: WRITEW to 0x%x, cmd=0x%x, word=0x%x, " "error=0x%x\n", slave, cmd, word, error)); AMDSMB_UNLOCK(sc); return (error); } static int amdsmb_readw(device_t dev, u_char slave, char cmd, short *word) { struct amdsmb_softc *sc = (struct amdsmb_softc *)device_get_softc(dev); u_char temp[2]; int error; AMDSMB_LOCK(sc); amdsmb_ec_write(sc, SMB_CMD, cmd); amdsmb_ec_write(sc, SMB_ADDR, slave); amdsmb_ec_write(sc, SMB_PRTCL, SMB_PRTCL_READ | SMB_PRTCL_WORD_DATA); if ((error = amdsmb_wait(sc)) == SMB_ENOERR) { amdsmb_ec_read(sc, SMB_DATA + 0, &temp[0]); amdsmb_ec_read(sc, SMB_DATA + 1, &temp[1]); *word = temp[0] | (temp[1] << 8); } AMDSMB_DEBUG(printf("amdsmb: READW from 0x%x, cmd=0x%x, word=0x%x, " "error=0x%x\n", slave, cmd, (unsigned short)*word, error)); AMDSMB_UNLOCK(sc); return (error); } static int amdsmb_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf) { struct amdsmb_softc *sc = (struct amdsmb_softc *)device_get_softc(dev); u_char i; int error; if (count < 1 || count > 32) return (SMB_EINVAL); AMDSMB_LOCK(sc); amdsmb_ec_write(sc, SMB_CMD, cmd); amdsmb_ec_write(sc, SMB_BCNT, count); for (i = 0; i < count; i++) amdsmb_ec_write(sc, SMB_DATA + i, buf[i]); amdsmb_ec_write(sc, SMB_ADDR, slave); amdsmb_ec_write(sc, SMB_PRTCL, SMB_PRTCL_WRITE | SMB_PRTCL_BLOCK_DATA); error = amdsmb_wait(sc); AMDSMB_DEBUG(printf("amdsmb: WRITEBLK to 0x%x, count=0x%x, cmd=0x%x, " "error=0x%x", slave, count, cmd, error)); AMDSMB_UNLOCK(sc); return (error); } static int amdsmb_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf) { struct amdsmb_softc *sc = (struct amdsmb_softc *)device_get_softc(dev); u_char data, len, i; int error; if (*count < 1 || *count > 32) return (SMB_EINVAL); AMDSMB_LOCK(sc); amdsmb_ec_write(sc, SMB_CMD, cmd); amdsmb_ec_write(sc, SMB_ADDR, slave); amdsmb_ec_write(sc, SMB_PRTCL, SMB_PRTCL_READ | SMB_PRTCL_BLOCK_DATA); if ((error = amdsmb_wait(sc)) == SMB_ENOERR) { amdsmb_ec_read(sc, SMB_BCNT, &len); for (i = 0; i < len; i++) { amdsmb_ec_read(sc, SMB_DATA + i, &data); if (i < *count) buf[i] = data; } *count = len; } AMDSMB_DEBUG(printf("amdsmb: READBLK to 0x%x, count=0x%x, cmd=0x%x, " "error=0x%x", slave, *count, cmd, error)); AMDSMB_UNLOCK(sc); return (error); } static device_method_t amdsmb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, amdsmb_probe), DEVMETHOD(device_attach, amdsmb_attach), DEVMETHOD(device_detach, amdsmb_detach), /* SMBus interface */ DEVMETHOD(smbus_callback, amdsmb_callback), DEVMETHOD(smbus_quick, amdsmb_quick), DEVMETHOD(smbus_sendb, amdsmb_sendb), DEVMETHOD(smbus_recvb, amdsmb_recvb), DEVMETHOD(smbus_writeb, amdsmb_writeb), DEVMETHOD(smbus_readb, amdsmb_readb), DEVMETHOD(smbus_writew, amdsmb_writew), DEVMETHOD(smbus_readw, amdsmb_readw), DEVMETHOD(smbus_bwrite, amdsmb_bwrite), DEVMETHOD(smbus_bread, amdsmb_bread), { 0, 0 } }; static driver_t amdsmb_driver = { "amdsmb", amdsmb_methods, sizeof(struct amdsmb_softc), }; DRIVER_MODULE(amdsmb, pci, amdsmb_driver, 0, 0); DRIVER_MODULE(smbus, amdsmb, smbus_driver, 0, 0); MODULE_DEPEND(amdsmb, pci, 1, 1, 1); MODULE_DEPEND(amdsmb, smbus, SMBUS_MINVER, SMBUS_PREFVER, SMBUS_MAXVER); MODULE_VERSION(amdsmb, 1); diff --git a/sys/dev/cas/if_cas.c b/sys/dev/cas/if_cas.c index 1f684097bd3a..fed2c3a3a051 100644 --- a/sys/dev/cas/if_cas.c +++ b/sys/dev/cas/if_cas.c @@ -1,2907 +1,2907 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (C) 2001 Eduardo Horvath. * Copyright (c) 2001-2003 Thomas Moestl * Copyright (c) 2007-2009 Marius Strobl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp * from: FreeBSD: if_gem.c 182060 2008-08-23 15:03:26Z marius */ #include /* * driver for Sun Cassini/Cassini+ and National Semiconductor DP83065 * Saturn Gigabit Ethernet controllers */ #if 0 #define CAS_DEBUG #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #if defined(__powerpc__) #include #include #include #endif #include #include #include #include #include #include #include #include "miibus_if.h" #define RINGASSERT(n , min, max) \ CTASSERT(powerof2(n) && (n) >= (min) && (n) <= (max)) RINGASSERT(CAS_NRXCOMP, 128, 32768); RINGASSERT(CAS_NRXDESC, 32, 8192); RINGASSERT(CAS_NRXDESC2, 32, 8192); RINGASSERT(CAS_NTXDESC, 32, 8192); #undef RINGASSERT #define CCDASSERT(m, a) \ CTASSERT((offsetof(struct cas_control_data, m) & ((a) - 1)) == 0) CCDASSERT(ccd_rxcomps, CAS_RX_COMP_ALIGN); CCDASSERT(ccd_rxdescs, CAS_RX_DESC_ALIGN); CCDASSERT(ccd_rxdescs2, CAS_RX_DESC_ALIGN); #undef CCDASSERT #define CAS_TRIES 10000 /* * According to documentation, the hardware has support for basic TCP * checksum offloading only, in practice this can be also used for UDP * however (i.e. the problem of previous Sun NICs that a checksum of 0x0 * is not converted to 0xffff no longer exists). */ #define CAS_CSUM_FEATURES (CSUM_TCP | CSUM_UDP) static inline void cas_add_rxdesc(struct cas_softc *sc, u_int idx); static int cas_attach(struct cas_softc *sc); static int cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set); static void cas_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void cas_detach(struct cas_softc *sc); static int cas_disable_rx(struct cas_softc *sc); static int cas_disable_tx(struct cas_softc *sc); static void cas_eint(struct cas_softc *sc, u_int status); static void cas_free(struct mbuf *m); static void cas_init(void *xsc); static void cas_init_locked(struct cas_softc *sc); static void cas_init_regs(struct cas_softc *sc); static int cas_intr(void *v); static void cas_intr_task(void *arg, int pending __unused); static int cas_ioctl(if_t ifp, u_long cmd, caddr_t data); static int cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head); static int cas_mediachange(if_t ifp); static void cas_mediastatus(if_t ifp, struct ifmediareq *ifmr); static void cas_meminit(struct cas_softc *sc); static void cas_mifinit(struct cas_softc *sc); static int cas_mii_readreg(device_t dev, int phy, int reg); static void cas_mii_statchg(device_t dev); static int cas_mii_writereg(device_t dev, int phy, int reg, int val); static void cas_reset(struct cas_softc *sc); static int cas_reset_rx(struct cas_softc *sc); static int cas_reset_tx(struct cas_softc *sc); static void cas_resume(struct cas_softc *sc); static u_int cas_descsize(u_int sz); static void cas_rint(struct cas_softc *sc); static void cas_rint_timeout(void *arg); static inline void cas_rxcksum(struct mbuf *m, uint16_t cksum); static inline void cas_rxcompinit(struct cas_rx_comp *rxcomp); static u_int cas_rxcompsize(u_int sz); static void cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static void cas_setladrf(struct cas_softc *sc); static void cas_start(if_t ifp); static void cas_stop(if_t ifp); static void cas_suspend(struct cas_softc *sc); static void cas_tick(void *arg); static void cas_tint(struct cas_softc *sc); static void cas_tx_task(void *arg, int pending __unused); static inline void cas_txkick(struct cas_softc *sc); static void cas_watchdog(struct cas_softc *sc); MODULE_DEPEND(cas, ether, 1, 1, 1); MODULE_DEPEND(cas, miibus, 1, 1, 1); #ifdef CAS_DEBUG #include #define KTR_CAS KTR_SPARE2 #endif static int cas_attach(struct cas_softc *sc) { struct cas_txsoft *txs; if_t ifp; int error, i; uint32_t v; /* Set up ifnet structure. */ ifp = sc->sc_ifp = if_alloc(IFT_ETHER); if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setstartfn(ifp, cas_start); if_setioctlfn(ifp, cas_ioctl); if_setinitfn(ifp, cas_init); if_setsendqlen(ifp, CAS_TXQUEUELEN); if_setsendqready(ifp); callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); /* Create local taskq. */ NET_TASK_INIT(&sc->sc_intr_task, 0, cas_intr_task, sc); TASK_INIT(&sc->sc_tx_task, 1, cas_tx_task, ifp); sc->sc_tq = taskqueue_create_fast("cas_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->sc_tq); error = taskqueue_start_threads(&sc->sc_tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->sc_dev)); if (error != 0) { device_printf(sc->sc_dev, "could not start threads\n"); goto fail_taskq; } /* Make sure the chip is stopped. */ cas_reset(sc); error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE, 0, BUS_SPACE_MAXSIZE, 0, NULL, NULL, &sc->sc_pdmatag); if (error != 0) goto fail_taskq; error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, CAS_PAGE_SIZE, 1, CAS_PAGE_SIZE, 0, NULL, NULL, &sc->sc_rdmatag); if (error != 0) goto fail_ptag; error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * CAS_NTXSEGS, CAS_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); if (error != 0) goto fail_rtag; error = bus_dma_tag_create(sc->sc_pdmatag, CAS_TX_DESC_ALIGN, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct cas_control_data), 1, sizeof(struct cas_control_data), 0, NULL, NULL, &sc->sc_cdmatag); if (error != 0) goto fail_ttag; /* * Allocate the control data structures, create and load the * DMA map for it. */ if ((error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_control_data, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cddmamap)) != 0) { device_printf(sc->sc_dev, "unable to allocate control data, error = %d\n", error); goto fail_ctag; } sc->sc_cddma = 0; if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, sc->sc_control_data, sizeof(struct cas_control_data), cas_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { device_printf(sc->sc_dev, "unable to load control data DMA map, error = %d\n", error); goto fail_cmem; } /* * Initialize the transmit job descriptors. */ STAILQ_INIT(&sc->sc_txfreeq); STAILQ_INIT(&sc->sc_txdirtyq); /* * Create the transmit buffer DMA maps. */ error = ENOMEM; for (i = 0; i < CAS_TXQUEUELEN; i++) { txs = &sc->sc_txsoft[i]; txs->txs_mbuf = NULL; txs->txs_ndescs = 0; if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, &txs->txs_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create TX DMA map %d, error = %d\n", i, error); goto fail_txd; } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); } /* * Allocate the receive buffers, create and load the DMA maps * for them. */ for (i = 0; i < CAS_NRXDESC; i++) { if ((error = bus_dmamem_alloc(sc->sc_rdmatag, &sc->sc_rxdsoft[i].rxds_buf, BUS_DMA_WAITOK, &sc->sc_rxdsoft[i].rxds_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to allocate RX buffer %d, error = %d\n", i, error); goto fail_rxmem; } sc->sc_rxdptr = i; sc->sc_rxdsoft[i].rxds_paddr = 0; if ((error = bus_dmamap_load(sc->sc_rdmatag, sc->sc_rxdsoft[i].rxds_dmamap, sc->sc_rxdsoft[i].rxds_buf, CAS_PAGE_SIZE, cas_rxdma_callback, sc, 0)) != 0 || sc->sc_rxdsoft[i].rxds_paddr == 0) { device_printf(sc->sc_dev, "unable to load RX DMA map %d, error = %d\n", i, error); goto fail_rxmap; } } if ((sc->sc_flags & CAS_SERDES) == 0) { CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_MII); CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); cas_mifinit(sc); /* * Look for an external PHY. */ error = ENXIO; v = CAS_READ_4(sc, CAS_MIF_CONF); if ((v & CAS_MIF_CONF_MDI1) != 0) { v |= CAS_MIF_CONF_PHY_SELECT; CAS_WRITE_4(sc, CAS_MIF_CONF, v); CAS_BARRIER(sc, CAS_MIF_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* Enable/unfreeze the GMII pins of Saturn. */ if (sc->sc_variant == CAS_SATURN) { CAS_WRITE_4(sc, CAS_SATURN_PCFG, CAS_READ_4(sc, CAS_SATURN_PCFG) & ~CAS_SATURN_PCFG_FSI); CAS_BARRIER(sc, CAS_SATURN_PCFG, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); DELAY(10000); } error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); } /* * Fall back on an internal PHY if no external PHY was found. */ if (error != 0 && (v & CAS_MIF_CONF_MDI0) != 0) { v &= ~CAS_MIF_CONF_PHY_SELECT; CAS_WRITE_4(sc, CAS_MIF_CONF, v); CAS_BARRIER(sc, CAS_MIF_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); /* Freeze the GMII pins of Saturn for saving power. */ if (sc->sc_variant == CAS_SATURN) { CAS_WRITE_4(sc, CAS_SATURN_PCFG, CAS_READ_4(sc, CAS_SATURN_PCFG) | CAS_SATURN_PCFG_FSI); CAS_BARRIER(sc, CAS_SATURN_PCFG, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); DELAY(10000); } error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); } } else { /* * Use the external PCS SERDES. */ CAS_WRITE_4(sc, CAS_PCS_DATAPATH, CAS_PCS_DATAPATH_SERDES); CAS_BARRIER(sc, CAS_PCS_DATAPATH, 4, BUS_SPACE_BARRIER_WRITE); /* Enable/unfreeze the SERDES pins of Saturn. */ if (sc->sc_variant == CAS_SATURN) { CAS_WRITE_4(sc, CAS_SATURN_PCFG, 0); CAS_BARRIER(sc, CAS_SATURN_PCFG, 4, BUS_SPACE_BARRIER_WRITE); } CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL, CAS_PCS_SERDES_CTRL_ESD); CAS_BARRIER(sc, CAS_PCS_SERDES_CTRL, 4, BUS_SPACE_BARRIER_WRITE); CAS_WRITE_4(sc, CAS_PCS_CONF, CAS_PCS_CONF_EN); CAS_BARRIER(sc, CAS_PCS_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, cas_mediachange, cas_mediastatus, BMSR_DEFCAPMASK, CAS_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE); } if (error != 0) { device_printf(sc->sc_dev, "attaching PHYs failed\n"); goto fail_rxmap; } sc->sc_mii = device_get_softc(sc->sc_miibus); /* * From this point forward, the attachment cannot fail. A failure * before this point releases all resources that may have been * allocated. */ /* Announce FIFO sizes. */ v = CAS_READ_4(sc, CAS_TX_FIFO_SIZE); device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", CAS_RX_FIFO_SIZE / 1024, v / 16); /* Attach the interface. */ ether_ifattach(ifp, sc->sc_enaddr); /* * Tell the upper layer(s) we support long frames/checksum offloads. */ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if_setcapabilities(ifp, IFCAP_VLAN_MTU); if ((sc->sc_flags & CAS_NO_CSUM) == 0) { if_setcapabilitiesbit(ifp, IFCAP_HWCSUM, 0); if_sethwassist(ifp, CAS_CSUM_FEATURES); } if_setcapenable(ifp, if_getcapabilities(ifp)); return (0); /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_rxmap: for (i = 0; i < CAS_NRXDESC; i++) if (sc->sc_rxdsoft[i].rxds_paddr != 0) bus_dmamap_unload(sc->sc_rdmatag, sc->sc_rxdsoft[i].rxds_dmamap); fail_rxmem: for (i = 0; i < CAS_NRXDESC; i++) if (sc->sc_rxdsoft[i].rxds_buf != NULL) bus_dmamem_free(sc->sc_rdmatag, sc->sc_rxdsoft[i].rxds_buf, sc->sc_rxdsoft[i].rxds_dmamap); fail_txd: for (i = 0; i < CAS_TXQUEUELEN; i++) if (sc->sc_txsoft[i].txs_dmamap != NULL) bus_dmamap_destroy(sc->sc_tdmatag, sc->sc_txsoft[i].txs_dmamap); bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); fail_cmem: bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, sc->sc_cddmamap); fail_ctag: bus_dma_tag_destroy(sc->sc_cdmatag); fail_ttag: bus_dma_tag_destroy(sc->sc_tdmatag); fail_rtag: bus_dma_tag_destroy(sc->sc_rdmatag); fail_ptag: bus_dma_tag_destroy(sc->sc_pdmatag); fail_taskq: taskqueue_free(sc->sc_tq); if_free(ifp); return (error); } static void cas_detach(struct cas_softc *sc) { if_t ifp = sc->sc_ifp; int i; ether_ifdetach(ifp); CAS_LOCK(sc); cas_stop(ifp); CAS_UNLOCK(sc); callout_drain(&sc->sc_tick_ch); callout_drain(&sc->sc_rx_ch); taskqueue_drain(sc->sc_tq, &sc->sc_intr_task); taskqueue_drain(sc->sc_tq, &sc->sc_tx_task); if_free(ifp); taskqueue_free(sc->sc_tq); - device_delete_child(sc->sc_dev, sc->sc_miibus); + bus_generic_detach(sc->sc_dev); for (i = 0; i < CAS_NRXDESC; i++) if (sc->sc_rxdsoft[i].rxds_dmamap != NULL) bus_dmamap_sync(sc->sc_rdmatag, sc->sc_rxdsoft[i].rxds_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (i = 0; i < CAS_NRXDESC; i++) if (sc->sc_rxdsoft[i].rxds_paddr != 0) bus_dmamap_unload(sc->sc_rdmatag, sc->sc_rxdsoft[i].rxds_dmamap); for (i = 0; i < CAS_NRXDESC; i++) if (sc->sc_rxdsoft[i].rxds_buf != NULL) bus_dmamem_free(sc->sc_rdmatag, sc->sc_rxdsoft[i].rxds_buf, sc->sc_rxdsoft[i].rxds_dmamap); for (i = 0; i < CAS_TXQUEUELEN; i++) if (sc->sc_txsoft[i].txs_dmamap != NULL) bus_dmamap_destroy(sc->sc_tdmatag, sc->sc_txsoft[i].txs_dmamap); CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, sc->sc_cddmamap); bus_dma_tag_destroy(sc->sc_cdmatag); bus_dma_tag_destroy(sc->sc_tdmatag); bus_dma_tag_destroy(sc->sc_rdmatag); bus_dma_tag_destroy(sc->sc_pdmatag); } static void cas_suspend(struct cas_softc *sc) { if_t ifp = sc->sc_ifp; CAS_LOCK(sc); cas_stop(ifp); CAS_UNLOCK(sc); } static void cas_resume(struct cas_softc *sc) { if_t ifp = sc->sc_ifp; CAS_LOCK(sc); /* * On resume all registers have to be initialized again like * after power-on. */ sc->sc_flags &= ~CAS_INITED; if (if_getflags(ifp) & IFF_UP) cas_init_locked(sc); CAS_UNLOCK(sc); } static inline void cas_rxcksum(struct mbuf *m, uint16_t cksum) { struct ether_header *eh; struct ip *ip; struct udphdr *uh; uint16_t *opts; int32_t hlen, len, pktlen; uint32_t temp32; pktlen = m->m_pkthdr.len; if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) return; eh = mtod(m, struct ether_header *); if (eh->ether_type != htons(ETHERTYPE_IP)) return; ip = (struct ip *)(eh + 1); if (ip->ip_v != IPVERSION) return; hlen = ip->ip_hl << 2; pktlen -= sizeof(struct ether_header); if (hlen < sizeof(struct ip)) return; if (ntohs(ip->ip_len) < hlen) return; if (ntohs(ip->ip_len) != pktlen) return; if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) return; /* Cannot handle fragmented packet. */ switch (ip->ip_p) { case IPPROTO_TCP: if (pktlen < (hlen + sizeof(struct tcphdr))) return; break; case IPPROTO_UDP: if (pktlen < (hlen + sizeof(struct udphdr))) return; uh = (struct udphdr *)((uint8_t *)ip + hlen); if (uh->uh_sum == 0) return; /* no checksum */ break; default: return; } cksum = ~cksum; /* checksum fixup for IP options */ len = hlen - sizeof(struct ip); if (len > 0) { opts = (uint16_t *)(ip + 1); for (; len > 0; len -= sizeof(uint16_t), opts++) { temp32 = cksum - *opts; temp32 = (temp32 >> 16) + (temp32 & 65535); cksum = temp32 & 65535; } } m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; m->m_pkthdr.csum_data = cksum; } static void cas_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct cas_softc *sc = xsc; if (error != 0) return; if (nsegs != 1) panic("%s: bad control buffer segment count", __func__); sc->sc_cddma = segs[0].ds_addr; } static void cas_rxdma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct cas_softc *sc = xsc; if (error != 0) return; if (nsegs != 1) panic("%s: bad RX buffer segment count", __func__); sc->sc_rxdsoft[sc->sc_rxdptr].rxds_paddr = segs[0].ds_addr; } static void cas_tick(void *arg) { struct cas_softc *sc = arg; if_t ifp = sc->sc_ifp; uint32_t v; CAS_LOCK_ASSERT(sc, MA_OWNED); /* * Unload collision and error counters. */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, CAS_READ_4(sc, CAS_MAC_NORM_COLL_CNT) + CAS_READ_4(sc, CAS_MAC_FIRST_COLL_CNT)); v = CAS_READ_4(sc, CAS_MAC_EXCESS_COLL_CNT) + CAS_READ_4(sc, CAS_MAC_LATE_COLL_CNT); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, v); if_inc_counter(ifp, IFCOUNTER_OERRORS, v); if_inc_counter(ifp, IFCOUNTER_IERRORS, CAS_READ_4(sc, CAS_MAC_RX_LEN_ERR_CNT) + CAS_READ_4(sc, CAS_MAC_RX_ALIGN_ERR) + CAS_READ_4(sc, CAS_MAC_RX_CRC_ERR_CNT) + CAS_READ_4(sc, CAS_MAC_RX_CODE_VIOL)); /* * Then clear the hardware counters. */ CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0); CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0); mii_tick(sc->sc_mii); if (sc->sc_txfree != CAS_MAXTXFREE) cas_tint(sc); cas_watchdog(sc); callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); } static int cas_bitwait(struct cas_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set) { int i; uint32_t reg; for (i = CAS_TRIES; i--; DELAY(100)) { reg = CAS_READ_4(sc, r); if ((reg & clr) == 0 && (reg & set) == set) return (1); } return (0); } static void cas_reset(struct cas_softc *sc) { #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); #endif /* Disable all interrupts in order to avoid spurious ones. */ CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); cas_reset_rx(sc); cas_reset_tx(sc); /* * Do a full reset modulo the result of the last auto-negotiation * when using the SERDES. */ CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX | ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); CAS_BARRIER(sc, CAS_RESET, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); DELAY(3000); if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) device_printf(sc->sc_dev, "cannot reset device\n"); } static void cas_stop(if_t ifp) { struct cas_softc *sc = if_getsoftc(ifp); struct cas_txsoft *txs; #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); #endif callout_stop(&sc->sc_tick_ch); callout_stop(&sc->sc_rx_ch); /* Disable all interrupts in order to avoid spurious ones. */ CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); cas_reset_tx(sc); cas_reset_rx(sc); /* * Release any queued transmit buffers. */ while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); if (txs->txs_ndescs != 0) { bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); if (txs->txs_mbuf != NULL) { m_freem(txs->txs_mbuf); txs->txs_mbuf = NULL; } } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); } /* * Mark the interface down and cancel the watchdog timer. */ if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); sc->sc_flags &= ~CAS_LINK; sc->sc_wdog_timer = 0; } static int cas_reset_rx(struct cas_softc *sc) { /* * Resetting while DMA is in progress can cause a bus hang, so we * disable DMA first. */ (void)cas_disable_rx(sc); CAS_WRITE_4(sc, CAS_RX_CONF, 0); CAS_BARRIER(sc, CAS_RX_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!cas_bitwait(sc, CAS_RX_CONF, CAS_RX_CONF_RXDMA_EN, 0)) device_printf(sc->sc_dev, "cannot disable RX DMA\n"); /* Finally, reset the ERX. */ CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_RX | ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); CAS_BARRIER(sc, CAS_RESET, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_RX, 0)) { device_printf(sc->sc_dev, "cannot reset receiver\n"); return (1); } return (0); } static int cas_reset_tx(struct cas_softc *sc) { /* * Resetting while DMA is in progress can cause a bus hang, so we * disable DMA first. */ (void)cas_disable_tx(sc); CAS_WRITE_4(sc, CAS_TX_CONF, 0); CAS_BARRIER(sc, CAS_TX_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!cas_bitwait(sc, CAS_TX_CONF, CAS_TX_CONF_TXDMA_EN, 0)) device_printf(sc->sc_dev, "cannot disable TX DMA\n"); /* Finally, reset the ETX. */ CAS_WRITE_4(sc, CAS_RESET, CAS_RESET_TX | ((sc->sc_flags & CAS_SERDES) != 0 ? CAS_RESET_PCS_DIS : 0)); CAS_BARRIER(sc, CAS_RESET, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!cas_bitwait(sc, CAS_RESET, CAS_RESET_TX, 0)) { device_printf(sc->sc_dev, "cannot reset transmitter\n"); return (1); } return (0); } static int cas_disable_rx(struct cas_softc *sc) { CAS_WRITE_4(sc, CAS_MAC_RX_CONF, CAS_READ_4(sc, CAS_MAC_RX_CONF) & ~CAS_MAC_RX_CONF_EN); CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_EN, 0)) return (1); if (bootverbose) device_printf(sc->sc_dev, "cannot disable RX MAC\n"); return (0); } static int cas_disable_tx(struct cas_softc *sc) { CAS_WRITE_4(sc, CAS_MAC_TX_CONF, CAS_READ_4(sc, CAS_MAC_TX_CONF) & ~CAS_MAC_TX_CONF_EN); CAS_BARRIER(sc, CAS_MAC_TX_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (cas_bitwait(sc, CAS_MAC_TX_CONF, CAS_MAC_TX_CONF_EN, 0)) return (1); if (bootverbose) device_printf(sc->sc_dev, "cannot disable TX MAC\n"); return (0); } static inline void cas_rxcompinit(struct cas_rx_comp *rxcomp) { rxcomp->crc_word1 = 0; rxcomp->crc_word2 = 0; rxcomp->crc_word3 = htole64(CAS_SET(ETHER_HDR_LEN + sizeof(struct ip), CAS_RC3_CSO)); rxcomp->crc_word4 = htole64(CAS_RC4_ZERO); } static void cas_meminit(struct cas_softc *sc) { int i; CAS_LOCK_ASSERT(sc, MA_OWNED); /* * Initialize the transmit descriptor ring. */ for (i = 0; i < CAS_NTXDESC; i++) { sc->sc_txdescs[i].cd_flags = 0; sc->sc_txdescs[i].cd_buf_ptr = 0; } sc->sc_txfree = CAS_MAXTXFREE; sc->sc_txnext = 0; sc->sc_txwin = 0; /* * Initialize the receive completion ring. */ for (i = 0; i < CAS_NRXCOMP; i++) cas_rxcompinit(&sc->sc_rxcomps[i]); sc->sc_rxcptr = 0; /* * Initialize the first receive descriptor ring. We leave * the second one zeroed as we don't actually use it. */ for (i = 0; i < CAS_NRXDESC; i++) CAS_INIT_RXDESC(sc, i, i); sc->sc_rxdptr = 0; CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); } static u_int cas_descsize(u_int sz) { switch (sz) { case 32: return (CAS_DESC_32); case 64: return (CAS_DESC_64); case 128: return (CAS_DESC_128); case 256: return (CAS_DESC_256); case 512: return (CAS_DESC_512); case 1024: return (CAS_DESC_1K); case 2048: return (CAS_DESC_2K); case 4096: return (CAS_DESC_4K); case 8192: return (CAS_DESC_8K); default: printf("%s: invalid descriptor ring size %d\n", __func__, sz); return (CAS_DESC_32); } } static u_int cas_rxcompsize(u_int sz) { switch (sz) { case 128: return (CAS_RX_CONF_COMP_128); case 256: return (CAS_RX_CONF_COMP_256); case 512: return (CAS_RX_CONF_COMP_512); case 1024: return (CAS_RX_CONF_COMP_1K); case 2048: return (CAS_RX_CONF_COMP_2K); case 4096: return (CAS_RX_CONF_COMP_4K); case 8192: return (CAS_RX_CONF_COMP_8K); case 16384: return (CAS_RX_CONF_COMP_16K); case 32768: return (CAS_RX_CONF_COMP_32K); default: printf("%s: invalid dcompletion ring size %d\n", __func__, sz); return (CAS_RX_CONF_COMP_128); } } static void cas_init(void *xsc) { struct cas_softc *sc = xsc; CAS_LOCK(sc); cas_init_locked(sc); CAS_UNLOCK(sc); } /* * Initialization of interface; set up initialization block * and transmit/receive descriptor rings. */ static void cas_init_locked(struct cas_softc *sc) { if_t ifp = sc->sc_ifp; uint32_t v; CAS_LOCK_ASSERT(sc, MA_OWNED); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) return; #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: %s: calling stop", device_get_name(sc->sc_dev), __func__); #endif /* * Initialization sequence. The numbered steps below correspond * to the sequence outlined in section 6.3.5.1 in the Ethernet * Channel Engine manual (part of the PCIO manual). * See also the STP2002-STQ document from Sun Microsystems. */ /* step 1 & 2. Reset the Ethernet Channel. */ cas_stop(ifp); cas_reset(sc); #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: %s: restarting", device_get_name(sc->sc_dev), __func__); #endif if ((sc->sc_flags & CAS_SERDES) == 0) /* Re-initialize the MIF. */ cas_mifinit(sc); /* step 3. Setup data structures in host memory. */ cas_meminit(sc); /* step 4. TX MAC registers & counters */ cas_init_regs(sc); /* step 5. RX MAC registers & counters */ /* step 6 & 7. Program Ring Base Addresses. */ CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_HI, (((uint64_t)CAS_CDTXDADDR(sc, 0)) >> 32)); CAS_WRITE_4(sc, CAS_TX_DESC3_BASE_LO, CAS_CDTXDADDR(sc, 0) & 0xffffffff); CAS_WRITE_4(sc, CAS_RX_COMP_BASE_HI, (((uint64_t)CAS_CDRXCADDR(sc, 0)) >> 32)); CAS_WRITE_4(sc, CAS_RX_COMP_BASE_LO, CAS_CDRXCADDR(sc, 0) & 0xffffffff); CAS_WRITE_4(sc, CAS_RX_DESC_BASE_HI, (((uint64_t)CAS_CDRXDADDR(sc, 0)) >> 32)); CAS_WRITE_4(sc, CAS_RX_DESC_BASE_LO, CAS_CDRXDADDR(sc, 0) & 0xffffffff); if ((sc->sc_flags & CAS_REG_PLUS) != 0) { CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_HI, (((uint64_t)CAS_CDRXD2ADDR(sc, 0)) >> 32)); CAS_WRITE_4(sc, CAS_RX_DESC2_BASE_LO, CAS_CDRXD2ADDR(sc, 0) & 0xffffffff); } #ifdef CAS_DEBUG CTR5(KTR_CAS, "loading TXDR %lx, RXCR %lx, RXDR %lx, RXD2R %lx, cddma %lx", CAS_CDTXDADDR(sc, 0), CAS_CDRXCADDR(sc, 0), CAS_CDRXDADDR(sc, 0), CAS_CDRXD2ADDR(sc, 0), sc->sc_cddma); #endif /* step 8. Global Configuration & Interrupt Masks */ /* Disable weighted round robin. */ CAS_WRITE_4(sc, CAS_CAW, CAS_CAW_RR_DIS); /* * Enable infinite bursts for revisions without PCI issues if * applicable. Doing so greatly improves the TX performance. */ CAS_WRITE_4(sc, CAS_INF_BURST, (sc->sc_flags & CAS_TABORT) == 0 ? CAS_INF_BURST_EN : 0); /* Set up interrupts. */ CAS_WRITE_4(sc, CAS_INTMASK, ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR | CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH | CAS_INTR_PCI_ERROR_INT #ifdef CAS_DEBUG | CAS_INTR_PCS_INT | CAS_INTR_MIF #endif )); /* Don't clear top level interrupts when CAS_STATUS_ALIAS is read. */ CAS_WRITE_4(sc, CAS_CLEAR_ALIAS, 0); CAS_WRITE_4(sc, CAS_MAC_RX_MASK, ~CAS_MAC_RX_OVERFLOW); CAS_WRITE_4(sc, CAS_MAC_TX_MASK, ~(CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR)); #ifdef CAS_DEBUG CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK, ~(CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE | CAS_MAC_CTRL_NON_PAUSE)); #else CAS_WRITE_4(sc, CAS_MAC_CTRL_MASK, CAS_MAC_CTRL_PAUSE_RCVD | CAS_MAC_CTRL_PAUSE | CAS_MAC_CTRL_NON_PAUSE); #endif /* Enable PCI error interrupts. */ CAS_WRITE_4(sc, CAS_ERROR_MASK, ~(CAS_ERROR_DTRTO | CAS_ERROR_OTHER | CAS_ERROR_DMAW_ZERO | CAS_ERROR_DMAR_ZERO | CAS_ERROR_RTRTO)); /* Enable PCI error interrupts in BIM configuration. */ CAS_WRITE_4(sc, CAS_BIM_CONF, CAS_BIM_CONF_DPAR_EN | CAS_BIM_CONF_RMA_EN | CAS_BIM_CONF_RTA_EN); /* * step 9. ETX Configuration: encode receive descriptor ring size, * enable DMA and disable pre-interrupt writeback completion. */ v = cas_descsize(CAS_NTXDESC) << CAS_TX_CONF_DESC3_SHFT; CAS_WRITE_4(sc, CAS_TX_CONF, v | CAS_TX_CONF_TXDMA_EN | CAS_TX_CONF_RDPP_DIS | CAS_TX_CONF_PICWB_DIS); /* step 10. ERX Configuration */ /* * Encode receive completion and descriptor ring sizes, set the * swivel offset. */ v = cas_rxcompsize(CAS_NRXCOMP) << CAS_RX_CONF_COMP_SHFT; v |= cas_descsize(CAS_NRXDESC) << CAS_RX_CONF_DESC_SHFT; if ((sc->sc_flags & CAS_REG_PLUS) != 0) v |= cas_descsize(CAS_NRXDESC2) << CAS_RX_CONF_DESC2_SHFT; CAS_WRITE_4(sc, CAS_RX_CONF, v | (ETHER_ALIGN << CAS_RX_CONF_SOFF_SHFT)); /* Set the PAUSE thresholds. We use the maximum OFF threshold. */ CAS_WRITE_4(sc, CAS_RX_PTHRS, (111 << CAS_RX_PTHRS_XOFF_SHFT) | (15 << CAS_RX_PTHRS_XON_SHFT)); /* RX blanking */ CAS_WRITE_4(sc, CAS_RX_BLANK, (15 << CAS_RX_BLANK_TIME_SHFT) | (5 << CAS_RX_BLANK_PKTS_SHFT)); /* Set RX_COMP_AFULL threshold to half of the RX completions. */ CAS_WRITE_4(sc, CAS_RX_AEMPTY_THRS, (CAS_NRXCOMP / 2) << CAS_RX_AEMPTY_COMP_SHFT); /* Initialize the RX page size register as appropriate for 8k. */ CAS_WRITE_4(sc, CAS_RX_PSZ, (CAS_RX_PSZ_8K << CAS_RX_PSZ_SHFT) | (4 << CAS_RX_PSZ_MB_CNT_SHFT) | (CAS_RX_PSZ_MB_STRD_2K << CAS_RX_PSZ_MB_STRD_SHFT) | (CAS_RX_PSZ_MB_OFF_64 << CAS_RX_PSZ_MB_OFF_SHFT)); /* Disable RX random early detection. */ CAS_WRITE_4(sc, CAS_RX_RED, 0); /* Zero the RX reassembly DMA table. */ for (v = 0; v <= CAS_RX_REAS_DMA_ADDR_LC; v++) { CAS_WRITE_4(sc, CAS_RX_REAS_DMA_ADDR, v); CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_LO, 0); CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_MD, 0); CAS_WRITE_4(sc, CAS_RX_REAS_DMA_DATA_HI, 0); } /* Ensure the RX control FIFO and RX IPP FIFO addresses are zero. */ CAS_WRITE_4(sc, CAS_RX_CTRL_FIFO, 0); CAS_WRITE_4(sc, CAS_RX_IPP_ADDR, 0); /* Finally, enable RX DMA. */ CAS_WRITE_4(sc, CAS_RX_CONF, CAS_READ_4(sc, CAS_RX_CONF) | CAS_RX_CONF_RXDMA_EN); /* step 11. Configure Media. */ /* step 12. RX_MAC Configuration Register */ v = CAS_READ_4(sc, CAS_MAC_RX_CONF); v &= ~(CAS_MAC_RX_CONF_STRPPAD | CAS_MAC_RX_CONF_EN); v |= CAS_MAC_RX_CONF_STRPFCS; sc->sc_mac_rxcfg = v; /* * Clear the RX filter and reprogram it. This will also set the * current RX MAC configuration and enable it. */ cas_setladrf(sc); /* step 13. TX_MAC Configuration Register */ v = CAS_READ_4(sc, CAS_MAC_TX_CONF); v |= CAS_MAC_TX_CONF_EN; (void)cas_disable_tx(sc); CAS_WRITE_4(sc, CAS_MAC_TX_CONF, v); /* step 14. Issue Transmit Pending command. */ /* step 15. Give the receiver a swift kick. */ CAS_WRITE_4(sc, CAS_RX_KICK, CAS_NRXDESC - 4); CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, 0); if ((sc->sc_flags & CAS_REG_PLUS) != 0) CAS_WRITE_4(sc, CAS_RX_KICK2, CAS_NRXDESC2 - 4); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); mii_mediachg(sc->sc_mii); /* Start the one second timer. */ sc->sc_wdog_timer = 0; callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); } static int cas_load_txmbuf(struct cas_softc *sc, struct mbuf **m_head) { bus_dma_segment_t txsegs[CAS_NTXSEGS]; struct cas_txsoft *txs; struct ip *ip; struct mbuf *m; uint64_t cflags; int error, nexttx, nsegs, offset, seg; CAS_LOCK_ASSERT(sc, MA_OWNED); /* Get a work queue entry. */ if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { /* Ran out of descriptors. */ return (ENOBUFS); } cflags = 0; if (((*m_head)->m_pkthdr.csum_flags & CAS_CSUM_FEATURES) != 0) { if (M_WRITABLE(*m_head) == 0) { m = m_dup(*m_head, M_NOWAIT); m_freem(*m_head); *m_head = m; if (m == NULL) return (ENOBUFS); } offset = sizeof(struct ether_header); m = m_pullup(*m_head, offset + sizeof(struct ip)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m, caddr_t) + offset); offset += (ip->ip_hl << 2); cflags = (offset << CAS_TD_CKSUM_START_SHFT) | ((offset + m->m_pkthdr.csum_data) << CAS_TD_CKSUM_STUFF_SHFT) | CAS_TD_CKSUM_EN; *m_head = m; } error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(*m_head, M_NOWAIT, CAS_NTXSEGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); /* If nsegs is wrong then the stack is corrupt. */ KASSERT(nsegs <= CAS_NTXSEGS, ("%s: too many DMA segments (%d)", __func__, nsegs)); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* * Ensure we have enough descriptors free to describe * the packet. Note, we always reserve one descriptor * at the end of the ring as a termination point, in * order to prevent wrap-around. */ if (nsegs > sc->sc_txfree - 1) { txs->txs_ndescs = 0; bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); return (ENOBUFS); } txs->txs_ndescs = nsegs; txs->txs_firstdesc = sc->sc_txnext; nexttx = txs->txs_firstdesc; for (seg = 0; seg < nsegs; seg++, nexttx = CAS_NEXTTX(nexttx)) { #ifdef CAS_DEBUG CTR6(KTR_CAS, "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", __func__, seg, nexttx, txsegs[seg].ds_len, txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr)); #endif sc->sc_txdescs[nexttx].cd_buf_ptr = htole64(txsegs[seg].ds_addr); KASSERT(txsegs[seg].ds_len < CAS_TD_BUF_LEN_MASK >> CAS_TD_BUF_LEN_SHFT, ("%s: segment size too large!", __func__)); sc->sc_txdescs[nexttx].cd_flags = htole64(txsegs[seg].ds_len << CAS_TD_BUF_LEN_SHFT); txs->txs_lastdesc = nexttx; } /* Set EOF on the last descriptor. */ #ifdef CAS_DEBUG CTR3(KTR_CAS, "%s: end of frame at segment %d, TX %d", __func__, seg, nexttx); #endif sc->sc_txdescs[txs->txs_lastdesc].cd_flags |= htole64(CAS_TD_END_OF_FRAME); /* Lastly set SOF on the first descriptor. */ #ifdef CAS_DEBUG CTR3(KTR_CAS, "%s: start of frame at segment %d, TX %d", __func__, seg, nexttx); #endif if (sc->sc_txwin += nsegs > CAS_MAXTXFREE * 2 / 3) { sc->sc_txwin = 0; sc->sc_txdescs[txs->txs_firstdesc].cd_flags |= htole64(cflags | CAS_TD_START_OF_FRAME | CAS_TD_INT_ME); } else sc->sc_txdescs[txs->txs_firstdesc].cd_flags |= htole64(cflags | CAS_TD_START_OF_FRAME); /* Sync the DMA map. */ bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_PREWRITE); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", __func__, txs->txs_firstdesc, txs->txs_lastdesc, txs->txs_ndescs); #endif STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); txs->txs_mbuf = *m_head; sc->sc_txnext = CAS_NEXTTX(txs->txs_lastdesc); sc->sc_txfree -= txs->txs_ndescs; return (0); } static void cas_init_regs(struct cas_softc *sc) { int i; const u_char *laddr = if_getlladdr(sc->sc_ifp); CAS_LOCK_ASSERT(sc, MA_OWNED); /* These registers are not cleared on reset. */ if ((sc->sc_flags & CAS_INITED) == 0) { /* magic values */ CAS_WRITE_4(sc, CAS_MAC_IPG0, 0); CAS_WRITE_4(sc, CAS_MAC_IPG1, 8); CAS_WRITE_4(sc, CAS_MAC_IPG2, 4); /* min frame length */ CAS_WRITE_4(sc, CAS_MAC_MIN_FRAME, ETHER_MIN_LEN); /* max frame length and max burst size */ CAS_WRITE_4(sc, CAS_MAC_MAX_BF, ((ETHER_MAX_LEN_JUMBO + ETHER_VLAN_ENCAP_LEN) << CAS_MAC_MAX_BF_FRM_SHFT) | (0x2000 << CAS_MAC_MAX_BF_BST_SHFT)); /* more magic values */ CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7); CAS_WRITE_4(sc, CAS_MAC_JAM_SIZE, 0x4); CAS_WRITE_4(sc, CAS_MAC_ATTEMPT_LIMIT, 0x10); CAS_WRITE_4(sc, CAS_MAC_CTRL_TYPE, 0x8808); /* random number seed */ CAS_WRITE_4(sc, CAS_MAC_RANDOM_SEED, ((laddr[5] << 8) | laddr[4]) & 0x3ff); /* secondary MAC addresses: 0:0:0:0:0:0 */ for (i = CAS_MAC_ADDR3; i <= CAS_MAC_ADDR41; i += CAS_MAC_ADDR4 - CAS_MAC_ADDR3) CAS_WRITE_4(sc, i, 0); /* MAC control address: 01:80:c2:00:00:01 */ CAS_WRITE_4(sc, CAS_MAC_ADDR42, 0x0001); CAS_WRITE_4(sc, CAS_MAC_ADDR43, 0xc200); CAS_WRITE_4(sc, CAS_MAC_ADDR44, 0x0180); /* MAC filter address: 0:0:0:0:0:0 */ CAS_WRITE_4(sc, CAS_MAC_AFILTER0, 0); CAS_WRITE_4(sc, CAS_MAC_AFILTER1, 0); CAS_WRITE_4(sc, CAS_MAC_AFILTER2, 0); CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK1_2, 0); CAS_WRITE_4(sc, CAS_MAC_AFILTER_MASK0, 0); /* Zero the hash table. */ for (i = CAS_MAC_HASH0; i <= CAS_MAC_HASH15; i += CAS_MAC_HASH1 - CAS_MAC_HASH0) CAS_WRITE_4(sc, i, 0); sc->sc_flags |= CAS_INITED; } /* Counters need to be zeroed. */ CAS_WRITE_4(sc, CAS_MAC_NORM_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_FIRST_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_EXCESS_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_LATE_COLL_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_DEFER_TMR_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_PEAK_ATTEMPTS, 0); CAS_WRITE_4(sc, CAS_MAC_RX_FRAME_COUNT, 0); CAS_WRITE_4(sc, CAS_MAC_RX_LEN_ERR_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_RX_ALIGN_ERR, 0); CAS_WRITE_4(sc, CAS_MAC_RX_CRC_ERR_CNT, 0); CAS_WRITE_4(sc, CAS_MAC_RX_CODE_VIOL, 0); /* Set XOFF PAUSE time. */ CAS_WRITE_4(sc, CAS_MAC_SPC, 0x1BF0 << CAS_MAC_SPC_TIME_SHFT); /* Set the station address. */ CAS_WRITE_4(sc, CAS_MAC_ADDR0, (laddr[4] << 8) | laddr[5]); CAS_WRITE_4(sc, CAS_MAC_ADDR1, (laddr[2] << 8) | laddr[3]); CAS_WRITE_4(sc, CAS_MAC_ADDR2, (laddr[0] << 8) | laddr[1]); /* Enable MII outputs. */ CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, CAS_MAC_XIF_CONF_TX_OE); } static void cas_tx_task(void *arg, int pending __unused) { if_t ifp; ifp = (if_t)arg; cas_start(ifp); } static inline void cas_txkick(struct cas_softc *sc) { /* * Update the TX kick register. This register has to point to the * descriptor after the last valid one and for optimum performance * should be incremented in multiples of 4 (the DMA engine fetches/ * updates descriptors in batches of 4). */ #ifdef CAS_DEBUG CTR3(KTR_CAS, "%s: %s: kicking TX %d", device_get_name(sc->sc_dev), __func__, sc->sc_txnext); #endif CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CAS_WRITE_4(sc, CAS_TX_KICK3, sc->sc_txnext); } static void cas_start(if_t ifp) { struct cas_softc *sc = if_getsoftc(ifp); struct mbuf *m; int kicked, ntx; CAS_LOCK(sc); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->sc_flags & CAS_LINK) == 0) { CAS_UNLOCK(sc); return; } if (sc->sc_txfree < CAS_MAXTXFREE / 4) cas_tint(sc); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: %s: txfree %d, txnext %d", device_get_name(sc->sc_dev), __func__, sc->sc_txfree, sc->sc_txnext); #endif ntx = 0; kicked = 0; for (; !if_sendq_empty(ifp) && sc->sc_txfree > 1;) { m = if_dequeue(ifp); if (m == NULL) break; if (cas_load_txmbuf(sc, &m) != 0) { if (m == NULL) break; if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); if_sendq_prepend(ifp, m); break; } if ((sc->sc_txnext % 4) == 0) { cas_txkick(sc); kicked = 1; } else kicked = 0; ntx++; BPF_MTAP(ifp, m); } if (ntx > 0) { if (kicked == 0) cas_txkick(sc); #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: packets enqueued, OWN on %d", device_get_name(sc->sc_dev), sc->sc_txnext); #endif /* Set a watchdog timer in case the chip flakes out. */ sc->sc_wdog_timer = 5; #ifdef CAS_DEBUG CTR3(KTR_CAS, "%s: %s: watchdog %d", device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); #endif } CAS_UNLOCK(sc); } static void cas_tint(struct cas_softc *sc) { if_t ifp = sc->sc_ifp; struct cas_txsoft *txs; int progress; uint32_t txlast; #ifdef CAS_DEBUG int i; CAS_LOCK_ASSERT(sc, MA_OWNED); CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); #endif /* * Go through our TX list and free mbufs for those * frames that have been transmitted. */ progress = 0; CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD); while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { #ifdef CAS_DEBUG if ((if_getflags(ifp) & IFF_DEBUG) != 0) { printf(" txsoft %p transmit chain:\n", txs); for (i = txs->txs_firstdesc;; i = CAS_NEXTTX(i)) { printf("descriptor %d: ", i); printf("cd_flags: 0x%016llx\t", (long long)le64toh( sc->sc_txdescs[i].cd_flags)); printf("cd_buf_ptr: 0x%016llx\n", (long long)le64toh( sc->sc_txdescs[i].cd_buf_ptr)); if (i == txs->txs_lastdesc) break; } } #endif /* * In theory, we could harvest some descriptors before * the ring is empty, but that's a bit complicated. * * CAS_TX_COMPn points to the last descriptor * processed + 1. */ txlast = CAS_READ_4(sc, CAS_TX_COMP3); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: txs->txs_firstdesc = %d, " "txs->txs_lastdesc = %d, txlast = %d", __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); #endif if (txs->txs_firstdesc <= txs->txs_lastdesc) { if ((txlast >= txs->txs_firstdesc) && (txlast <= txs->txs_lastdesc)) break; } else { /* Ick -- this command wraps. */ if ((txlast >= txs->txs_firstdesc) || (txlast <= txs->txs_lastdesc)) break; } #ifdef CAS_DEBUG CTR1(KTR_CAS, "%s: releasing a descriptor", __func__); #endif STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); sc->sc_txfree += txs->txs_ndescs; bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); if (txs->txs_mbuf != NULL) { m_freem(txs->txs_mbuf); txs->txs_mbuf = NULL; } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); progress = 1; } #ifdef CAS_DEBUG CTR5(KTR_CAS, "%s: CAS_TX_SM1 %x CAS_TX_SM2 %x CAS_TX_DESC_BASE %llx " "CAS_TX_COMP3 %x", __func__, CAS_READ_4(sc, CAS_TX_SM1), CAS_READ_4(sc, CAS_TX_SM2), ((long long)CAS_READ_4(sc, CAS_TX_DESC3_BASE_HI) << 32) | CAS_READ_4(sc, CAS_TX_DESC3_BASE_LO), CAS_READ_4(sc, CAS_TX_COMP3)); #endif if (progress) { /* We freed some descriptors, so reset IFF_DRV_OACTIVE. */ if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); if (STAILQ_EMPTY(&sc->sc_txdirtyq)) sc->sc_wdog_timer = 0; } #ifdef CAS_DEBUG CTR3(KTR_CAS, "%s: %s: watchdog %d", device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); #endif } static void cas_rint_timeout(void *arg) { struct epoch_tracker et; struct cas_softc *sc = arg; CAS_LOCK_ASSERT(sc, MA_OWNED); NET_EPOCH_ENTER(et); cas_rint(sc); NET_EPOCH_EXIT(et); } static void cas_rint(struct cas_softc *sc) { struct cas_rxdsoft *rxds, *rxds2; if_t ifp = sc->sc_ifp; struct mbuf *m, *m2; uint64_t word1, word2, word3 __unused, word4; uint32_t rxhead; u_int idx, idx2, len, off, skip; CAS_LOCK_ASSERT(sc, MA_OWNED); callout_stop(&sc->sc_rx_ch); #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: %s", device_get_name(sc->sc_dev), __func__); #endif #define PRINTWORD(n, delimiter) \ printf("word ## n: 0x%016llx%c", (long long)word ## n, delimiter) #define SKIPASSERT(n) \ KASSERT(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n == 0, \ ("%s: word ## n not 0", __func__)) #define WORDTOH(n) \ word ## n = le64toh(sc->sc_rxcomps[sc->sc_rxcptr].crc_word ## n) /* * Read the completion head register once. This limits * how long the following loop can execute. */ rxhead = CAS_READ_4(sc, CAS_RX_COMP_HEAD); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d", __func__, sc->sc_rxcptr, sc->sc_rxdptr, rxhead); #endif skip = 0; CAS_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (; sc->sc_rxcptr != rxhead; sc->sc_rxcptr = CAS_NEXTRXCOMP(sc->sc_rxcptr)) { if (skip != 0) { SKIPASSERT(1); SKIPASSERT(2); SKIPASSERT(3); --skip; goto skip; } WORDTOH(1); WORDTOH(2); WORDTOH(3); WORDTOH(4); #ifdef CAS_DEBUG if ((if_getflags(ifp) & IFF_DEBUG) != 0) { printf(" completion %d: ", sc->sc_rxcptr); PRINTWORD(1, '\t'); PRINTWORD(2, '\t'); PRINTWORD(3, '\t'); PRINTWORD(4, '\n'); } #endif if (__predict_false( (word1 & CAS_RC1_TYPE_MASK) == CAS_RC1_TYPE_HW || (word4 & CAS_RC4_ZERO) != 0)) { /* * The descriptor is still marked as owned, although * it is supposed to have completed. This has been * observed on some machines. Just exiting here * might leave the packet sitting around until another * one arrives to trigger a new interrupt, which is * generally undesirable, so set up a timeout. */ callout_reset(&sc->sc_rx_ch, CAS_RXOWN_TICKS, cas_rint_timeout, sc); break; } if (__predict_false( (word4 & (CAS_RC4_BAD | CAS_RC4_LEN_MMATCH)) != 0)) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); device_printf(sc->sc_dev, "receive error: CRC error\n"); continue; } KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 || CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0, ("%s: data and header present", __func__)); KASSERT((word1 & CAS_RC1_SPLIT_PKT) == 0 || CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0, ("%s: split and header present", __func__)); KASSERT(CAS_GET(word1, CAS_RC1_DATA_SIZE) == 0 || (word1 & CAS_RC1_RELEASE_HDR) == 0, ("%s: data present but header release", __func__)); KASSERT(CAS_GET(word2, CAS_RC2_HDR_SIZE) == 0 || (word1 & CAS_RC1_RELEASE_DATA) == 0, ("%s: header present but data release", __func__)); if ((len = CAS_GET(word2, CAS_RC2_HDR_SIZE)) != 0) { idx = CAS_GET(word2, CAS_RC2_HDR_INDEX); off = CAS_GET(word2, CAS_RC2_HDR_OFF); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: hdr at idx %d, off %d, len %d", __func__, idx, off, len); #endif rxds = &sc->sc_rxdsoft[idx]; MGETHDR(m, M_NOWAIT, MT_DATA); if (m != NULL) { refcount_acquire(&rxds->rxds_refcount); bus_dmamap_sync(sc->sc_rdmatag, rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD); m_extadd(m, (char *)rxds->rxds_buf + off * 256 + ETHER_ALIGN, len, cas_free, sc, (void *)(uintptr_t)idx, M_RDONLY, EXT_NET_DRV); if ((m->m_flags & M_EXT) == 0) { m_freem(m); m = NULL; } } if (m != NULL) { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) cas_rxcksum(m, CAS_GET(word4, CAS_RC4_TCP_CSUM)); /* Pass it on. */ CAS_UNLOCK(sc); if_input(ifp, m); CAS_LOCK(sc); } else if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); if ((word1 & CAS_RC1_RELEASE_HDR) != 0 && refcount_release(&rxds->rxds_refcount) != 0) cas_add_rxdesc(sc, idx); } else if ((len = CAS_GET(word1, CAS_RC1_DATA_SIZE)) != 0) { idx = CAS_GET(word1, CAS_RC1_DATA_INDEX); off = CAS_GET(word1, CAS_RC1_DATA_OFF); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: data at idx %d, off %d, len %d", __func__, idx, off, len); #endif rxds = &sc->sc_rxdsoft[idx]; MGETHDR(m, M_NOWAIT, MT_DATA); if (m != NULL) { refcount_acquire(&rxds->rxds_refcount); off += ETHER_ALIGN; m->m_len = min(CAS_PAGE_SIZE - off, len); bus_dmamap_sync(sc->sc_rdmatag, rxds->rxds_dmamap, BUS_DMASYNC_POSTREAD); m_extadd(m, (char *)rxds->rxds_buf + off, m->m_len, cas_free, sc, (void *)(uintptr_t)idx, M_RDONLY, EXT_NET_DRV); if ((m->m_flags & M_EXT) == 0) { m_freem(m); m = NULL; } } idx2 = 0; m2 = NULL; rxds2 = NULL; if ((word1 & CAS_RC1_SPLIT_PKT) != 0) { KASSERT((word1 & CAS_RC1_RELEASE_NEXT) != 0, ("%s: split but no release next", __func__)); idx2 = CAS_GET(word2, CAS_RC2_NEXT_INDEX); #ifdef CAS_DEBUG CTR2(KTR_CAS, "%s: split at idx %d", __func__, idx2); #endif rxds2 = &sc->sc_rxdsoft[idx2]; if (m != NULL) { MGET(m2, M_NOWAIT, MT_DATA); if (m2 != NULL) { refcount_acquire( &rxds2->rxds_refcount); m2->m_len = len - m->m_len; bus_dmamap_sync( sc->sc_rdmatag, rxds2->rxds_dmamap, BUS_DMASYNC_POSTREAD); m_extadd(m2, (char *)rxds2->rxds_buf, m2->m_len, cas_free, sc, (void *)(uintptr_t)idx2, M_RDONLY, EXT_NET_DRV); if ((m2->m_flags & M_EXT) == 0) { m_freem(m2); m2 = NULL; } } } if (m2 != NULL) m->m_next = m2; else if (m != NULL) { m_freem(m); m = NULL; } } if (m != NULL) { m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = len; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) cas_rxcksum(m, CAS_GET(word4, CAS_RC4_TCP_CSUM)); /* Pass it on. */ CAS_UNLOCK(sc); if_input(ifp, m); CAS_LOCK(sc); } else if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); if ((word1 & CAS_RC1_RELEASE_DATA) != 0 && refcount_release(&rxds->rxds_refcount) != 0) cas_add_rxdesc(sc, idx); if ((word1 & CAS_RC1_SPLIT_PKT) != 0 && refcount_release(&rxds2->rxds_refcount) != 0) cas_add_rxdesc(sc, idx2); } skip = CAS_GET(word1, CAS_RC1_SKIP); skip: cas_rxcompinit(&sc->sc_rxcomps[sc->sc_rxcptr]); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) break; } CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CAS_WRITE_4(sc, CAS_RX_COMP_TAIL, sc->sc_rxcptr); #undef PRINTWORD #undef SKIPASSERT #undef WORDTOH #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: done sc->sc_rxcptr %d, sc->sc_rxdptr %d, head %d", __func__, sc->sc_rxcptr, sc->sc_rxdptr, CAS_READ_4(sc, CAS_RX_COMP_HEAD)); #endif } static void cas_free(struct mbuf *m) { struct cas_rxdsoft *rxds; struct cas_softc *sc; u_int idx, locked; sc = m->m_ext.ext_arg1; idx = (uintptr_t)m->m_ext.ext_arg2; rxds = &sc->sc_rxdsoft[idx]; if (refcount_release(&rxds->rxds_refcount) == 0) return; /* * NB: this function can be called via m_freem(9) within * this driver! */ if ((locked = CAS_LOCK_OWNED(sc)) == 0) CAS_LOCK(sc); cas_add_rxdesc(sc, idx); if (locked == 0) CAS_UNLOCK(sc); } static inline void cas_add_rxdesc(struct cas_softc *sc, u_int idx) { CAS_LOCK_ASSERT(sc, MA_OWNED); bus_dmamap_sync(sc->sc_rdmatag, sc->sc_rxdsoft[idx].rxds_dmamap, BUS_DMASYNC_PREREAD); CAS_UPDATE_RXDESC(sc, sc->sc_rxdptr, idx); sc->sc_rxdptr = CAS_NEXTRXDESC(sc->sc_rxdptr); /* * Update the RX kick register. This register has to point to the * descriptor after the last valid one (before the current batch) * and for optimum performance should be incremented in multiples * of 4 (the DMA engine fetches/updates descriptors in batches of 4). */ if ((sc->sc_rxdptr % 4) == 0) { CAS_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); CAS_WRITE_4(sc, CAS_RX_KICK, (sc->sc_rxdptr + CAS_NRXDESC - 4) & CAS_NRXDESC_MASK); } } static void cas_eint(struct cas_softc *sc, u_int status) { if_t ifp = sc->sc_ifp; CAS_LOCK_ASSERT(sc, MA_OWNED); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status); if ((status & CAS_INTR_PCI_ERROR_INT) != 0) { status = CAS_READ_4(sc, CAS_ERROR_STATUS); printf(", PCI bus error 0x%x", status); if ((status & CAS_ERROR_OTHER) != 0) { status = pci_read_config(sc->sc_dev, PCIR_STATUS, 2); printf(", PCI status 0x%x", status); pci_write_config(sc->sc_dev, PCIR_STATUS, status, 2); } } printf("\n"); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); cas_init_locked(sc); if (!if_sendq_empty(ifp)) taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); } static int cas_intr(void *v) { struct cas_softc *sc = v; if (__predict_false((CAS_READ_4(sc, CAS_STATUS_ALIAS) & CAS_INTR_SUMMARY) == 0)) return (FILTER_STRAY); /* Disable interrupts. */ CAS_WRITE_4(sc, CAS_INTMASK, 0xffffffff); taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); return (FILTER_HANDLED); } static void cas_intr_task(void *arg, int pending __unused) { struct cas_softc *sc = arg; if_t ifp = sc->sc_ifp; uint32_t status, status2; CAS_LOCK_ASSERT(sc, MA_NOTOWNED); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; status = CAS_READ_4(sc, CAS_STATUS); if (__predict_false((status & CAS_INTR_SUMMARY) == 0)) goto done; CAS_LOCK(sc); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: %s: cplt %x, status %x", device_get_name(sc->sc_dev), __func__, (status >> CAS_STATUS_TX_COMP3_SHFT), (u_int)status); /* * PCS interrupts must be cleared, otherwise no traffic is passed! */ if ((status & CAS_INTR_PCS_INT) != 0) { status2 = CAS_READ_4(sc, CAS_PCS_INTR_STATUS) | CAS_READ_4(sc, CAS_PCS_INTR_STATUS); if ((status2 & CAS_PCS_INTR_LINK) != 0) device_printf(sc->sc_dev, "%s: PCS link status changed\n", __func__); } if ((status & CAS_MAC_CTRL_STATUS) != 0) { status2 = CAS_READ_4(sc, CAS_MAC_CTRL_STATUS); if ((status2 & CAS_MAC_CTRL_PAUSE) != 0) device_printf(sc->sc_dev, "%s: PAUSE received (PAUSE time %d slots)\n", __func__, (status2 & CAS_MAC_CTRL_STATUS_PT_MASK) >> CAS_MAC_CTRL_STATUS_PT_SHFT); if ((status2 & CAS_MAC_CTRL_PAUSE) != 0) device_printf(sc->sc_dev, "%s: transited to PAUSE state\n", __func__); if ((status2 & CAS_MAC_CTRL_NON_PAUSE) != 0) device_printf(sc->sc_dev, "%s: transited to non-PAUSE state\n", __func__); } if ((status & CAS_INTR_MIF) != 0) device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); #endif if (__predict_false((status & (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR | CAS_INTR_RX_LEN_MMATCH | CAS_INTR_PCI_ERROR_INT)) != 0)) { cas_eint(sc, status); CAS_UNLOCK(sc); return; } if (__predict_false(status & CAS_INTR_TX_MAC_INT)) { status2 = CAS_READ_4(sc, CAS_MAC_TX_STATUS); if ((status2 & (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_MAX_PKT_ERR)) != 0) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); else if ((status2 & ~CAS_MAC_TX_FRAME_XMTD) != 0) device_printf(sc->sc_dev, "MAC TX fault, status %x\n", status2); } if (__predict_false(status & CAS_INTR_RX_MAC_INT)) { status2 = CAS_READ_4(sc, CAS_MAC_RX_STATUS); if ((status2 & CAS_MAC_RX_OVERFLOW) != 0) if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); else if ((status2 & ~CAS_MAC_RX_FRAME_RCVD) != 0) device_printf(sc->sc_dev, "MAC RX fault, status %x\n", status2); } if ((status & (CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0) { cas_rint(sc); #ifdef CAS_DEBUG if (__predict_false((status & (CAS_INTR_RX_BUF_NA | CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL)) != 0)) device_printf(sc->sc_dev, "RX fault, status %x\n", status); #endif } if ((status & (CAS_INTR_TX_INT_ME | CAS_INTR_TX_ALL | CAS_INTR_TX_DONE)) != 0) cas_tint(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) { CAS_UNLOCK(sc); return; } else if (!if_sendq_empty(ifp)) taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); CAS_UNLOCK(sc); status = CAS_READ_4(sc, CAS_STATUS_ALIAS); if (__predict_false((status & CAS_INTR_SUMMARY) != 0)) { taskqueue_enqueue(sc->sc_tq, &sc->sc_intr_task); return; } done: /* Re-enable interrupts. */ CAS_WRITE_4(sc, CAS_INTMASK, ~(CAS_INTR_TX_INT_ME | CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_DONE | CAS_INTR_RX_BUF_NA | CAS_INTR_RX_TAG_ERR | CAS_INTR_RX_COMP_FULL | CAS_INTR_RX_BUF_AEMPTY | CAS_INTR_RX_COMP_AFULL | CAS_INTR_RX_LEN_MMATCH | CAS_INTR_PCI_ERROR_INT #ifdef CAS_DEBUG | CAS_INTR_PCS_INT | CAS_INTR_MIF #endif )); } static void cas_watchdog(struct cas_softc *sc) { if_t ifp = sc->sc_ifp; CAS_LOCK_ASSERT(sc, MA_OWNED); #ifdef CAS_DEBUG CTR4(KTR_CAS, "%s: CAS_RX_CONF %x CAS_MAC_RX_STATUS %x CAS_MAC_RX_CONF %x", __func__, CAS_READ_4(sc, CAS_RX_CONF), CAS_READ_4(sc, CAS_MAC_RX_STATUS), CAS_READ_4(sc, CAS_MAC_RX_CONF)); CTR4(KTR_CAS, "%s: CAS_TX_CONF %x CAS_MAC_TX_STATUS %x CAS_MAC_TX_CONF %x", __func__, CAS_READ_4(sc, CAS_TX_CONF), CAS_READ_4(sc, CAS_MAC_TX_STATUS), CAS_READ_4(sc, CAS_MAC_TX_CONF)); #endif if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) return; if ((sc->sc_flags & CAS_LINK) != 0) device_printf(sc->sc_dev, "device timeout\n"); else if (bootverbose) device_printf(sc->sc_dev, "device timeout (no link)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* Try to get more packets going. */ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); cas_init_locked(sc); if (!if_sendq_empty(ifp)) taskqueue_enqueue(sc->sc_tq, &sc->sc_tx_task); } static void cas_mifinit(struct cas_softc *sc) { /* Configure the MIF in frame mode. */ CAS_WRITE_4(sc, CAS_MIF_CONF, CAS_READ_4(sc, CAS_MIF_CONF) & ~CAS_MIF_CONF_BB_MODE); CAS_BARRIER(sc, CAS_MIF_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } /* * MII interface * * The MII interface supports at least three different operating modes: * * Bitbang mode is implemented using data, clock and output enable registers. * * Frame mode is implemented by loading a complete frame into the frame * register and polling the valid bit for completion. * * Polling mode uses the frame register but completion is indicated by * an interrupt. * */ static int cas_mii_readreg(device_t dev, int phy, int reg) { struct cas_softc *sc; int n; uint32_t v; #ifdef CAS_DEBUG_PHY printf("%s: phy %d reg %d\n", __func__, phy, reg); #endif sc = device_get_softc(dev); if ((sc->sc_flags & CAS_SERDES) != 0) { switch (reg) { case MII_BMCR: reg = CAS_PCS_CTRL; break; case MII_BMSR: reg = CAS_PCS_STATUS; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); case MII_ANAR: reg = CAS_PCS_ANAR; break; case MII_ANLPAR: reg = CAS_PCS_ANLPAR; break; case MII_EXTSR: return (EXTSR_1000XFDX | EXTSR_1000XHDX); default: device_printf(sc->sc_dev, "%s: unhandled register %d\n", __func__, reg); return (0); } return (CAS_READ_4(sc, reg)); } /* Construct the frame command. */ v = CAS_MIF_FRAME_READ | (phy << CAS_MIF_FRAME_PHY_SHFT) | (reg << CAS_MIF_FRAME_REG_SHFT); CAS_WRITE_4(sc, CAS_MIF_FRAME, v); CAS_BARRIER(sc, CAS_MIF_FRAME, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); for (n = 0; n < 100; n++) { DELAY(1); v = CAS_READ_4(sc, CAS_MIF_FRAME); if (v & CAS_MIF_FRAME_TA_LSB) return (v & CAS_MIF_FRAME_DATA); } device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (0); } static int cas_mii_writereg(device_t dev, int phy, int reg, int val) { struct cas_softc *sc; int n; uint32_t v; #ifdef CAS_DEBUG_PHY printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); #endif sc = device_get_softc(dev); if ((sc->sc_flags & CAS_SERDES) != 0) { switch (reg) { case MII_BMSR: reg = CAS_PCS_STATUS; break; case MII_BMCR: reg = CAS_PCS_CTRL; if ((val & CAS_PCS_CTRL_RESET) == 0) break; CAS_WRITE_4(sc, CAS_PCS_CTRL, val); CAS_BARRIER(sc, CAS_PCS_CTRL, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!cas_bitwait(sc, CAS_PCS_CTRL, CAS_PCS_CTRL_RESET, 0)) device_printf(sc->sc_dev, "cannot reset PCS\n"); /* FALLTHROUGH */ case MII_ANAR: CAS_WRITE_4(sc, CAS_PCS_CONF, 0); CAS_BARRIER(sc, CAS_PCS_CONF, 4, BUS_SPACE_BARRIER_WRITE); CAS_WRITE_4(sc, CAS_PCS_ANAR, val); CAS_BARRIER(sc, CAS_PCS_ANAR, 4, BUS_SPACE_BARRIER_WRITE); CAS_WRITE_4(sc, CAS_PCS_SERDES_CTRL, CAS_PCS_SERDES_CTRL_ESD); CAS_BARRIER(sc, CAS_PCS_CONF, 4, BUS_SPACE_BARRIER_WRITE); CAS_WRITE_4(sc, CAS_PCS_CONF, CAS_PCS_CONF_EN); CAS_BARRIER(sc, CAS_PCS_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (0); case MII_ANLPAR: reg = CAS_PCS_ANLPAR; break; default: device_printf(sc->sc_dev, "%s: unhandled register %d\n", __func__, reg); return (0); } CAS_WRITE_4(sc, reg, val); CAS_BARRIER(sc, reg, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (0); } /* Construct the frame command. */ v = CAS_MIF_FRAME_WRITE | (phy << CAS_MIF_FRAME_PHY_SHFT) | (reg << CAS_MIF_FRAME_REG_SHFT) | (val & CAS_MIF_FRAME_DATA); CAS_WRITE_4(sc, CAS_MIF_FRAME, v); CAS_BARRIER(sc, CAS_MIF_FRAME, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); for (n = 0; n < 100; n++) { DELAY(1); v = CAS_READ_4(sc, CAS_MIF_FRAME); if (v & CAS_MIF_FRAME_TA_LSB) return (1); } device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (0); } static void cas_mii_statchg(device_t dev) { struct cas_softc *sc; if_t ifp; int gigabit; uint32_t rxcfg, txcfg, v; sc = device_get_softc(dev); ifp = sc->sc_ifp; CAS_LOCK_ASSERT(sc, MA_OWNED); #ifdef CAS_DEBUG if ((if_getflags(ifp) & IFF_DEBUG) != 0) device_printf(sc->sc_dev, "%s: status changen", __func__); #endif if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) sc->sc_flags |= CAS_LINK; else sc->sc_flags &= ~CAS_LINK; switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { case IFM_1000_SX: case IFM_1000_LX: case IFM_1000_CX: case IFM_1000_T: gigabit = 1; break; default: gigabit = 0; } /* * The configuration done here corresponds to the steps F) and * G) and as far as enabling of RX and TX MAC goes also step H) * of the initialization sequence outlined in section 11.2.1 of * the Cassini+ ASIC Specification. */ rxcfg = sc->sc_mac_rxcfg; rxcfg &= ~CAS_MAC_RX_CONF_CARR; txcfg = CAS_MAC_TX_CONF_EN_IPG0 | CAS_MAC_TX_CONF_NGU | CAS_MAC_TX_CONF_NGUL; if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) txcfg |= CAS_MAC_TX_CONF_ICARR | CAS_MAC_TX_CONF_ICOLLIS; else if (gigabit != 0) { rxcfg |= CAS_MAC_RX_CONF_CARR; txcfg |= CAS_MAC_TX_CONF_CARR; } (void)cas_disable_tx(sc); CAS_WRITE_4(sc, CAS_MAC_TX_CONF, txcfg); (void)cas_disable_rx(sc); CAS_WRITE_4(sc, CAS_MAC_RX_CONF, rxcfg); v = CAS_READ_4(sc, CAS_MAC_CTRL_CONF) & ~(CAS_MAC_CTRL_CONF_TXP | CAS_MAC_CTRL_CONF_RXP); if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) v |= CAS_MAC_CTRL_CONF_RXP; if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) v |= CAS_MAC_CTRL_CONF_TXP; CAS_WRITE_4(sc, CAS_MAC_CTRL_CONF, v); /* * All supported chips have a bug causing incorrect checksum * to be calculated when letting them strip the FCS in half- * duplex mode. In theory we could disable FCS stripping and * manually adjust the checksum accordingly. It seems to make * more sense to optimze for the common case and just disable * hardware checksumming in half-duplex mode though. */ if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) { if_setcapenablebit(ifp, 0, IFCAP_HWCSUM); if_sethwassist(ifp, 0); } else if ((sc->sc_flags & CAS_NO_CSUM) == 0) { if_setcapenable(ifp, if_getcapabilities(ifp)); if_sethwassist(ifp, CAS_CSUM_FEATURES); } if (sc->sc_variant == CAS_SATURN) { if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) /* silicon bug workaround */ CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x41); else CAS_WRITE_4(sc, CAS_MAC_PREAMBLE_LEN, 0x7); } if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && gigabit != 0) CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME, CAS_MAC_SLOT_TIME_CARR); else CAS_WRITE_4(sc, CAS_MAC_SLOT_TIME, CAS_MAC_SLOT_TIME_NORM); /* XIF Configuration */ v = CAS_MAC_XIF_CONF_TX_OE | CAS_MAC_XIF_CONF_LNKLED; if ((sc->sc_flags & CAS_SERDES) == 0) { if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) v |= CAS_MAC_XIF_CONF_NOECHO; v |= CAS_MAC_XIF_CONF_BUF_OE; } if (gigabit != 0) v |= CAS_MAC_XIF_CONF_GMII; if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) v |= CAS_MAC_XIF_CONF_FDXLED; CAS_WRITE_4(sc, CAS_MAC_XIF_CONF, v); sc->sc_mac_rxcfg = rxcfg; if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && (sc->sc_flags & CAS_LINK) != 0) { CAS_WRITE_4(sc, CAS_MAC_TX_CONF, txcfg | CAS_MAC_TX_CONF_EN); CAS_WRITE_4(sc, CAS_MAC_RX_CONF, rxcfg | CAS_MAC_RX_CONF_EN); } } static int cas_mediachange(if_t ifp) { struct cas_softc *sc = if_getsoftc(ifp); int error; /* XXX add support for serial media. */ CAS_LOCK(sc); error = mii_mediachg(sc->sc_mii); CAS_UNLOCK(sc); return (error); } static void cas_mediastatus(if_t ifp, struct ifmediareq *ifmr) { struct cas_softc *sc = if_getsoftc(ifp); CAS_LOCK(sc); if ((if_getflags(ifp) & IFF_UP) == 0) { CAS_UNLOCK(sc); return; } mii_pollstat(sc->sc_mii); ifmr->ifm_active = sc->sc_mii->mii_media_active; ifmr->ifm_status = sc->sc_mii->mii_media_status; CAS_UNLOCK(sc); } static int cas_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct cas_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; int error; error = 0; switch (cmd) { case SIOCSIFFLAGS: CAS_LOCK(sc); if ((if_getflags(ifp) & IFF_UP) != 0) { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && ((if_getflags(ifp) ^ sc->sc_ifflags) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) cas_setladrf(sc); else cas_init_locked(sc); } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) cas_stop(ifp); sc->sc_ifflags = if_getflags(ifp); CAS_UNLOCK(sc); break; case SIOCSIFCAP: CAS_LOCK(sc); if ((sc->sc_flags & CAS_NO_CSUM) != 0) { error = EINVAL; CAS_UNLOCK(sc); break; } if_setcapenable(ifp, ifr->ifr_reqcap); if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) if_sethwassist(ifp, CAS_CSUM_FEATURES); else if_sethwassist(ifp, 0); CAS_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: CAS_LOCK(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) cas_setladrf(sc); CAS_UNLOCK(sc); break; case SIOCSIFMTU: if ((ifr->ifr_mtu < ETHERMIN) || (ifr->ifr_mtu > ETHERMTU_JUMBO)) error = EINVAL; else if_setmtu(ifp, ifr->ifr_mtu); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static u_int cas_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t crc, *hash = arg; crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN); /* We just want the 8 most significant bits. */ crc >>= 24; /* Set the corresponding bit in the filter. */ hash[crc >> 4] |= 1 << (15 - (crc & 15)); return (1); } static void cas_setladrf(struct cas_softc *sc) { if_t ifp = sc->sc_ifp; int i; uint32_t hash[16]; uint32_t v; CAS_LOCK_ASSERT(sc, MA_OWNED); /* * Turn off the RX MAC and the hash filter as required by the Sun * Cassini programming restrictions. */ v = sc->sc_mac_rxcfg & ~(CAS_MAC_RX_CONF_HFILTER | CAS_MAC_RX_CONF_EN); CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v); CAS_BARRIER(sc, CAS_MAC_RX_CONF, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!cas_bitwait(sc, CAS_MAC_RX_CONF, CAS_MAC_RX_CONF_HFILTER | CAS_MAC_RX_CONF_EN, 0)) device_printf(sc->sc_dev, "cannot disable RX MAC or hash filter\n"); v &= ~(CAS_MAC_RX_CONF_PROMISC | CAS_MAC_RX_CONF_PGRP); if ((if_getflags(ifp) & IFF_PROMISC) != 0) { v |= CAS_MAC_RX_CONF_PROMISC; goto chipit; } if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { v |= CAS_MAC_RX_CONF_PGRP; goto chipit; } /* * Set up multicast address filter by passing all multicast * addresses through a crc generator, and then using the high * order 8 bits as an index into the 256 bit logical address * filter. The high order 4 bits selects the word, while the * other 4 bits select the bit within the word (where bit 0 * is the MSB). */ memset(hash, 0, sizeof(hash)); if_foreach_llmaddr(ifp, cas_hash_maddr, &hash); v |= CAS_MAC_RX_CONF_HFILTER; /* Now load the hash table into the chip (if we are using it). */ for (i = 0; i < 16; i++) CAS_WRITE_4(sc, CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0), hash[i]); chipit: sc->sc_mac_rxcfg = v; CAS_WRITE_4(sc, CAS_MAC_RX_CONF, v | CAS_MAC_RX_CONF_EN); } static int cas_pci_attach(device_t dev); static int cas_pci_detach(device_t dev); static int cas_pci_probe(device_t dev); static int cas_pci_resume(device_t dev); static int cas_pci_suspend(device_t dev); static device_method_t cas_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cas_pci_probe), DEVMETHOD(device_attach, cas_pci_attach), DEVMETHOD(device_detach, cas_pci_detach), DEVMETHOD(device_suspend, cas_pci_suspend), DEVMETHOD(device_resume, cas_pci_resume), /* Use the suspend handler here, it is all that is required. */ DEVMETHOD(device_shutdown, cas_pci_suspend), /* MII interface */ DEVMETHOD(miibus_readreg, cas_mii_readreg), DEVMETHOD(miibus_writereg, cas_mii_writereg), DEVMETHOD(miibus_statchg, cas_mii_statchg), DEVMETHOD_END }; static driver_t cas_pci_driver = { "cas", cas_pci_methods, sizeof(struct cas_softc) }; static const struct cas_pci_dev { uint32_t cpd_devid; uint8_t cpd_revid; int cpd_variant; const char *cpd_desc; } cas_pci_devlist[] = { { 0x0035100b, 0x0, CAS_SATURN, "NS DP83065 Saturn Gigabit Ethernet" }, { 0xabba108e, 0x10, CAS_CASPLUS, "Sun Cassini+ Gigabit Ethernet" }, { 0xabba108e, 0x0, CAS_CAS, "Sun Cassini Gigabit Ethernet" }, { 0, 0, 0, NULL } }; DRIVER_MODULE(cas, pci, cas_pci_driver, 0, 0); MODULE_PNP_INFO("W32:vendor/device", pci, cas, cas_pci_devlist, nitems(cas_pci_devlist) - 1); DRIVER_MODULE(miibus, cas, miibus_driver, 0, 0); MODULE_DEPEND(cas, pci, 1, 1, 1); static int cas_pci_probe(device_t dev) { int i; for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) { if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid && pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) { device_set_desc(dev, cas_pci_devlist[i].cpd_desc); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static struct resource_spec cas_pci_res_spec[] = { { SYS_RES_IRQ, 0, RF_SHAREABLE | RF_ACTIVE }, /* CAS_RES_INTR */ { SYS_RES_MEMORY, PCIR_BAR(0), RF_ACTIVE }, /* CAS_RES_MEM */ { -1, 0 } }; #define CAS_LOCAL_MAC_ADDRESS "local-mac-address" #define CAS_PHY_INTERFACE "phy-interface" #define CAS_PHY_TYPE "phy-type" #define CAS_PHY_TYPE_PCS "pcs" static int cas_pci_attach(device_t dev) { char buf[sizeof(CAS_LOCAL_MAC_ADDRESS)]; struct cas_softc *sc; int i; #if !defined(__powerpc__) u_char enaddr[4][ETHER_ADDR_LEN]; u_int j, k, lma, pcs[4], phy; #endif sc = device_get_softc(dev); sc->sc_variant = CAS_UNKNOWN; for (i = 0; cas_pci_devlist[i].cpd_desc != NULL; i++) { if (pci_get_devid(dev) == cas_pci_devlist[i].cpd_devid && pci_get_revid(dev) >= cas_pci_devlist[i].cpd_revid) { sc->sc_variant = cas_pci_devlist[i].cpd_variant; break; } } if (sc->sc_variant == CAS_UNKNOWN) { device_printf(dev, "unknown adaptor\n"); return (ENXIO); } /* PCI configuration */ pci_write_config(dev, PCIR_COMMAND, pci_read_config(dev, PCIR_COMMAND, 2) | PCIM_CMD_BUSMASTEREN | PCIM_CMD_MWRICEN | PCIM_CMD_PERRESPEN | PCIM_CMD_SERRESPEN, 2); sc->sc_dev = dev; if (sc->sc_variant == CAS_CAS && pci_get_devid(dev) < 0x02) /* Hardware checksumming may hang TX. */ sc->sc_flags |= CAS_NO_CSUM; if (sc->sc_variant == CAS_CASPLUS || sc->sc_variant == CAS_SATURN) sc->sc_flags |= CAS_REG_PLUS; if (sc->sc_variant == CAS_CAS || (sc->sc_variant == CAS_CASPLUS && pci_get_revid(dev) < 0x11)) sc->sc_flags |= CAS_TABORT; if (bootverbose) device_printf(dev, "flags=0x%x\n", sc->sc_flags); if (bus_alloc_resources(dev, cas_pci_res_spec, sc->sc_res)) { device_printf(dev, "failed to allocate resources\n"); bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); return (ENXIO); } CAS_LOCK_INIT(sc, device_get_nameunit(dev)); #if defined(__powerpc__) OF_getetheraddr(dev, sc->sc_enaddr); if (OF_getprop(ofw_bus_get_node(dev), CAS_PHY_INTERFACE, buf, sizeof(buf)) > 0 || OF_getprop(ofw_bus_get_node(dev), CAS_PHY_TYPE, buf, sizeof(buf)) > 0) { buf[sizeof(buf) - 1] = '\0'; if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0) sc->sc_flags |= CAS_SERDES; } #else /* * Dig out VPD (vital product data) and read the MAC address as well * as the PHY type. The VPD resides in the PCI Expansion ROM (PCI * FCode) and can't be accessed via the PCI capability pointer. * SUNW,pci-ce and SUNW,pci-qge use the Enhanced VPD format described * in the free US Patent 7149820. */ #define PCI_ROMHDR_SIZE 0x1c #define PCI_ROMHDR_SIG 0x00 #define PCI_ROMHDR_SIG_MAGIC 0xaa55 /* little endian */ #define PCI_ROMHDR_PTR_DATA 0x18 #define PCI_ROM_SIZE 0x18 #define PCI_ROM_SIG 0x00 #define PCI_ROM_SIG_MAGIC 0x52494350 /* "PCIR", endian */ /* reversed */ #define PCI_ROM_VENDOR 0x04 #define PCI_ROM_DEVICE 0x06 #define PCI_ROM_PTR_VPD 0x08 #define PCI_VPDRES_BYTE0 0x00 #define PCI_VPDRES_ISLARGE(x) ((x) & 0x80) #define PCI_VPDRES_LARGE_NAME(x) ((x) & 0x7f) #define PCI_VPDRES_LARGE_LEN_LSB 0x01 #define PCI_VPDRES_LARGE_LEN_MSB 0x02 #define PCI_VPDRES_LARGE_SIZE 0x03 #define PCI_VPDRES_TYPE_ID_STRING 0x02 /* large */ #define PCI_VPDRES_TYPE_VPD 0x10 /* large */ #define PCI_VPD_KEY0 0x00 #define PCI_VPD_KEY1 0x01 #define PCI_VPD_LEN 0x02 #define PCI_VPD_SIZE 0x03 #define CAS_ROM_READ_1(sc, offs) \ CAS_READ_1((sc), CAS_PCI_ROM_OFFSET + (offs)) #define CAS_ROM_READ_2(sc, offs) \ CAS_READ_2((sc), CAS_PCI_ROM_OFFSET + (offs)) #define CAS_ROM_READ_4(sc, offs) \ CAS_READ_4((sc), CAS_PCI_ROM_OFFSET + (offs)) lma = phy = 0; memset(enaddr, 0, sizeof(enaddr)); memset(pcs, 0, sizeof(pcs)); /* Enable PCI Expansion ROM access. */ CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN, CAS_BIM_LDEV_OEN_PAD | CAS_BIM_LDEV_OEN_PROM); /* Read PCI Expansion ROM header. */ if (CAS_ROM_READ_2(sc, PCI_ROMHDR_SIG) != PCI_ROMHDR_SIG_MAGIC || (i = CAS_ROM_READ_2(sc, PCI_ROMHDR_PTR_DATA)) < PCI_ROMHDR_SIZE) { device_printf(dev, "unexpected PCI Expansion ROM header\n"); goto fail_prom; } /* Read PCI Expansion ROM data. */ if (CAS_ROM_READ_4(sc, i + PCI_ROM_SIG) != PCI_ROM_SIG_MAGIC || CAS_ROM_READ_2(sc, i + PCI_ROM_VENDOR) != pci_get_vendor(dev) || CAS_ROM_READ_2(sc, i + PCI_ROM_DEVICE) != pci_get_device(dev) || (j = CAS_ROM_READ_2(sc, i + PCI_ROM_PTR_VPD)) < i + PCI_ROM_SIZE) { device_printf(dev, "unexpected PCI Expansion ROM data\n"); goto fail_prom; } /* Read PCI VPD. */ next: if (PCI_VPDRES_ISLARGE(CAS_ROM_READ_1(sc, j + PCI_VPDRES_BYTE0)) == 0) { device_printf(dev, "no large PCI VPD\n"); goto fail_prom; } i = (CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_MSB) << 8) | CAS_ROM_READ_1(sc, j + PCI_VPDRES_LARGE_LEN_LSB); switch (PCI_VPDRES_LARGE_NAME(CAS_ROM_READ_1(sc, j + PCI_VPDRES_BYTE0))) { case PCI_VPDRES_TYPE_ID_STRING: /* Skip identifier string. */ j += PCI_VPDRES_LARGE_SIZE + i; goto next; case PCI_VPDRES_TYPE_VPD: for (j += PCI_VPDRES_LARGE_SIZE; i > 0; i -= PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN), j += PCI_VPD_SIZE + CAS_ROM_READ_1(sc, j + PCI_VPD_LEN)) { if (CAS_ROM_READ_1(sc, j + PCI_VPD_KEY0) != 'Z') /* no Enhanced VPD */ continue; if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE) != 'I') /* no instance property */ continue; if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) == 'B') { /* byte array */ if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 4) != ETHER_ADDR_LEN) continue; bus_read_region_1(sc->sc_res[CAS_RES_MEM], CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5, buf, sizeof(buf)); buf[sizeof(buf) - 1] = '\0'; if (strcmp(buf, CAS_LOCAL_MAC_ADDRESS) != 0) continue; bus_read_region_1(sc->sc_res[CAS_RES_MEM], CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5 + sizeof(CAS_LOCAL_MAC_ADDRESS), enaddr[lma], sizeof(enaddr[lma])); lma++; if (lma == 4 && phy == 4) break; } else if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 3) == 'S') { /* string */ if (CAS_ROM_READ_1(sc, j + PCI_VPD_SIZE + 4) != sizeof(CAS_PHY_TYPE_PCS)) continue; bus_read_region_1(sc->sc_res[CAS_RES_MEM], CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5, buf, sizeof(buf)); buf[sizeof(buf) - 1] = '\0'; if (strcmp(buf, CAS_PHY_INTERFACE) == 0) k = sizeof(CAS_PHY_INTERFACE); else if (strcmp(buf, CAS_PHY_TYPE) == 0) k = sizeof(CAS_PHY_TYPE); else continue; bus_read_region_1(sc->sc_res[CAS_RES_MEM], CAS_PCI_ROM_OFFSET + j + PCI_VPD_SIZE + 5 + k, buf, sizeof(buf)); buf[sizeof(buf) - 1] = '\0'; if (strcmp(buf, CAS_PHY_TYPE_PCS) == 0) pcs[phy] = 1; phy++; if (lma == 4 && phy == 4) break; } } break; default: device_printf(dev, "unexpected PCI VPD\n"); goto fail_prom; } fail_prom: CAS_WRITE_4(sc, CAS_BIM_LDEV_OEN, 0); if (lma == 0) { device_printf(dev, "could not determine Ethernet address\n"); goto fail; } i = 0; if (lma > 1 && pci_get_slot(dev) < nitems(enaddr)) i = pci_get_slot(dev); memcpy(sc->sc_enaddr, enaddr[i], ETHER_ADDR_LEN); if (phy == 0) { device_printf(dev, "could not determine PHY type\n"); goto fail; } i = 0; if (phy > 1 && pci_get_slot(dev) < nitems(pcs)) i = pci_get_slot(dev); if (pcs[i] != 0) sc->sc_flags |= CAS_SERDES; #endif if (cas_attach(sc) != 0) { device_printf(dev, "could not be attached\n"); goto fail; } if (bus_setup_intr(dev, sc->sc_res[CAS_RES_INTR], INTR_TYPE_NET | INTR_MPSAFE, cas_intr, NULL, sc, &sc->sc_ih) != 0) { device_printf(dev, "failed to set up interrupt\n"); cas_detach(sc); goto fail; } return (0); fail: CAS_LOCK_DESTROY(sc); bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); return (ENXIO); } static int cas_pci_detach(device_t dev) { struct cas_softc *sc; sc = device_get_softc(dev); bus_teardown_intr(dev, sc->sc_res[CAS_RES_INTR], sc->sc_ih); cas_detach(sc); CAS_LOCK_DESTROY(sc); bus_release_resources(dev, cas_pci_res_spec, sc->sc_res); return (0); } static int cas_pci_suspend(device_t dev) { cas_suspend(device_get_softc(dev)); return (0); } static int cas_pci_resume(device_t dev) { cas_resume(device_get_softc(dev)); return (0); } diff --git a/sys/dev/enetc/if_enetc.c b/sys/dev/enetc/if_enetc.c index 3c0bc4723b05..3a5d6ec23282 100644 --- a/sys/dev/enetc/if_enetc.c +++ b/sys/dev/enetc/if_enetc.c @@ -1,1533 +1,1532 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2021 Alstom Group. * Copyright (c) 2021 Semihalf. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" #include "miibus_if.h" static device_register_t enetc_register; static ifdi_attach_pre_t enetc_attach_pre; static ifdi_attach_post_t enetc_attach_post; static ifdi_detach_t enetc_detach; static ifdi_tx_queues_alloc_t enetc_tx_queues_alloc; static ifdi_rx_queues_alloc_t enetc_rx_queues_alloc; static ifdi_queues_free_t enetc_queues_free; static ifdi_init_t enetc_init; static ifdi_stop_t enetc_stop; static ifdi_msix_intr_assign_t enetc_msix_intr_assign; static ifdi_tx_queue_intr_enable_t enetc_tx_queue_intr_enable; static ifdi_rx_queue_intr_enable_t enetc_rx_queue_intr_enable; static ifdi_intr_enable_t enetc_intr_enable; static ifdi_intr_disable_t enetc_intr_disable; static int enetc_isc_txd_encap(void*, if_pkt_info_t); static void enetc_isc_txd_flush(void*, uint16_t, qidx_t); static int enetc_isc_txd_credits_update(void*, uint16_t, bool); static int enetc_isc_rxd_available(void*, uint16_t, qidx_t, qidx_t); static int enetc_isc_rxd_pkt_get(void*, if_rxd_info_t); static void enetc_isc_rxd_refill(void*, if_rxd_update_t); static void enetc_isc_rxd_flush(void*, uint16_t, uint8_t, qidx_t); static void enetc_vlan_register(if_ctx_t, uint16_t); static void enetc_vlan_unregister(if_ctx_t, uint16_t); static uint64_t enetc_get_counter(if_ctx_t, ift_counter); static int enetc_promisc_set(if_ctx_t, int); static int enetc_mtu_set(if_ctx_t, uint32_t); static void enetc_setup_multicast(if_ctx_t); static void enetc_timer(if_ctx_t, uint16_t); static void enetc_update_admin_status(if_ctx_t); static bool enetc_if_needs_restart(if_ctx_t, enum iflib_restart_event); static miibus_readreg_t enetc_miibus_readreg; static miibus_writereg_t enetc_miibus_writereg; static miibus_linkchg_t enetc_miibus_linkchg; static miibus_statchg_t enetc_miibus_statchg; static int enetc_media_change(if_t); static void enetc_media_status(if_t, struct ifmediareq*); static int enetc_fixed_media_change(if_t); static void enetc_fixed_media_status(if_t, struct ifmediareq*); static void enetc_max_nqueues(struct enetc_softc*, int*, int*); static int enetc_setup_phy(struct enetc_softc*); static void enetc_get_hwaddr(struct enetc_softc*); static void enetc_set_hwaddr(struct enetc_softc*); static int enetc_setup_rss(struct enetc_softc*); static void enetc_init_hw(struct enetc_softc*); static void enetc_init_ctrl(struct enetc_softc*); static void enetc_init_tx(struct enetc_softc*); static void enetc_init_rx(struct enetc_softc*); static int enetc_ctrl_send(struct enetc_softc*, uint16_t, uint16_t, iflib_dma_info_t); static const char enetc_driver_version[] = "1.0.0"; static const pci_vendor_info_t enetc_vendor_info_array[] = { PVID(PCI_VENDOR_FREESCALE, ENETC_DEV_ID_PF, "Freescale ENETC PCIe Gigabit Ethernet Controller"), PVID_END }; #define ENETC_IFCAPS (IFCAP_VLAN_MTU | IFCAP_RXCSUM | IFCAP_JUMBO_MTU | \ IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWFILTER) static device_method_t enetc_methods[] = { DEVMETHOD(device_register, enetc_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), DEVMETHOD(miibus_readreg, enetc_miibus_readreg), DEVMETHOD(miibus_writereg, enetc_miibus_writereg), DEVMETHOD(miibus_linkchg, enetc_miibus_linkchg), DEVMETHOD(miibus_statchg, enetc_miibus_statchg), DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), DEVMETHOD(bus_release_resource, bus_generic_release_resource), DEVMETHOD(bus_activate_resource, bus_generic_activate_resource), DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource), DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource), DEVMETHOD_END }; static driver_t enetc_driver = { "enetc", enetc_methods, sizeof(struct enetc_softc) }; DRIVER_MODULE(miibus, enetc, miibus_fdt_driver, NULL, NULL); /* Make sure miibus gets procesed first. */ DRIVER_MODULE_ORDERED(enetc, pci, enetc_driver, NULL, NULL, SI_ORDER_ANY); MODULE_VERSION(enetc, 1); IFLIB_PNP_INFO(pci, enetc, enetc_vendor_info_array); MODULE_DEPEND(enetc, ether, 1, 1, 1); MODULE_DEPEND(enetc, iflib, 1, 1, 1); MODULE_DEPEND(enetc, miibus, 1, 1, 1); static device_method_t enetc_iflib_methods[] = { DEVMETHOD(ifdi_attach_pre, enetc_attach_pre), DEVMETHOD(ifdi_attach_post, enetc_attach_post), DEVMETHOD(ifdi_detach, enetc_detach), DEVMETHOD(ifdi_init, enetc_init), DEVMETHOD(ifdi_stop, enetc_stop), DEVMETHOD(ifdi_tx_queues_alloc, enetc_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, enetc_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, enetc_queues_free), DEVMETHOD(ifdi_msix_intr_assign, enetc_msix_intr_assign), DEVMETHOD(ifdi_tx_queue_intr_enable, enetc_tx_queue_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, enetc_rx_queue_intr_enable), DEVMETHOD(ifdi_intr_enable, enetc_intr_enable), DEVMETHOD(ifdi_intr_disable, enetc_intr_disable), DEVMETHOD(ifdi_vlan_register, enetc_vlan_register), DEVMETHOD(ifdi_vlan_unregister, enetc_vlan_unregister), DEVMETHOD(ifdi_get_counter, enetc_get_counter), DEVMETHOD(ifdi_mtu_set, enetc_mtu_set), DEVMETHOD(ifdi_multi_set, enetc_setup_multicast), DEVMETHOD(ifdi_promisc_set, enetc_promisc_set), DEVMETHOD(ifdi_timer, enetc_timer), DEVMETHOD(ifdi_update_admin_status, enetc_update_admin_status), DEVMETHOD(ifdi_needs_restart, enetc_if_needs_restart), DEVMETHOD_END }; static driver_t enetc_iflib_driver = { "enetc", enetc_iflib_methods, sizeof(struct enetc_softc) }; static struct if_txrx enetc_txrx = { .ift_txd_encap = enetc_isc_txd_encap, .ift_txd_flush = enetc_isc_txd_flush, .ift_txd_credits_update = enetc_isc_txd_credits_update, .ift_rxd_available = enetc_isc_rxd_available, .ift_rxd_pkt_get = enetc_isc_rxd_pkt_get, .ift_rxd_refill = enetc_isc_rxd_refill, .ift_rxd_flush = enetc_isc_rxd_flush }; static struct if_shared_ctx enetc_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = ENETC_RING_ALIGN, .isc_tx_maxsize = ENETC_MAX_FRAME_LEN, .isc_tx_maxsegsize = PAGE_SIZE, .isc_rx_maxsize = ENETC_MAX_FRAME_LEN, .isc_rx_maxsegsize = ENETC_MAX_FRAME_LEN, .isc_rx_nsegments = ENETC_MAX_SCATTER, .isc_admin_intrcnt = 0, .isc_nfl = 1, .isc_nrxqs = 1, .isc_ntxqs = 1, .isc_vendor_info = enetc_vendor_info_array, .isc_driver_version = enetc_driver_version, .isc_driver = &enetc_iflib_driver, .isc_flags = IFLIB_DRIVER_MEDIA | IFLIB_PRESERVE_TX_INDICES, .isc_ntxd_min = {ENETC_MIN_DESC}, .isc_ntxd_max = {ENETC_MAX_DESC}, .isc_ntxd_default = {ENETC_DEFAULT_DESC}, .isc_nrxd_min = {ENETC_MIN_DESC}, .isc_nrxd_max = {ENETC_MAX_DESC}, .isc_nrxd_default = {ENETC_DEFAULT_DESC} }; static void* enetc_register(device_t dev) { if (!ofw_bus_status_okay(dev)) return (NULL); return (&enetc_sctx_init); } static void enetc_max_nqueues(struct enetc_softc *sc, int *max_tx_nqueues, int *max_rx_nqueues) { uint32_t val; val = ENETC_PORT_RD4(sc, ENETC_PCAPR0); *max_tx_nqueues = MIN(ENETC_PCAPR0_TXBDR(val), ENETC_MAX_QUEUES); *max_rx_nqueues = MIN(ENETC_PCAPR0_RXBDR(val), ENETC_MAX_QUEUES); } static int enetc_setup_fixed(struct enetc_softc *sc, phandle_t node) { ssize_t size; int speed; size = OF_getencprop(node, "speed", &speed, sizeof(speed)); if (size <= 0) { device_printf(sc->dev, "Device has fixed-link node without link speed specified\n"); return (ENXIO); } switch (speed) { case 10: speed = IFM_10_T; break; case 100: speed = IFM_100_TX; break; case 1000: speed = IFM_1000_T; break; case 2500: speed = IFM_2500_T; break; default: device_printf(sc->dev, "Unsupported link speed value of %d\n", speed); return (ENXIO); } speed |= IFM_ETHER; if (OF_hasprop(node, "full-duplex")) speed |= IFM_FDX; else speed |= IFM_HDX; sc->fixed_link = true; ifmedia_init(&sc->fixed_ifmedia, 0, enetc_fixed_media_change, enetc_fixed_media_status); ifmedia_add(&sc->fixed_ifmedia, speed, 0, NULL); ifmedia_set(&sc->fixed_ifmedia, speed); sc->shared->isc_media = &sc->fixed_ifmedia; return (0); } static int enetc_setup_phy(struct enetc_softc *sc) { phandle_t node, fixed_link, phy_handle; struct mii_data *miid; int phy_addr, error; ssize_t size; node = ofw_bus_get_node(sc->dev); fixed_link = ofw_bus_find_child(node, "fixed-link"); if (fixed_link != 0) return (enetc_setup_fixed(sc, fixed_link)); size = OF_getencprop(node, "phy-handle", &phy_handle, sizeof(phy_handle)); if (size <= 0) { device_printf(sc->dev, "Failed to acquire PHY handle from FDT.\n"); return (ENXIO); } phy_handle = OF_node_from_xref(phy_handle); size = OF_getencprop(phy_handle, "reg", &phy_addr, sizeof(phy_addr)); if (size <= 0) { device_printf(sc->dev, "Failed to obtain PHY address\n"); return (ENXIO); } error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(sc->ctx), enetc_media_change, enetc_media_status, BMSR_DEFCAPMASK, phy_addr, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { device_printf(sc->dev, "mii_attach failed\n"); return (error); } miid = device_get_softc(sc->miibus); sc->shared->isc_media = &miid->mii_media; return (0); } static int enetc_attach_pre(if_ctx_t ctx) { if_softc_ctx_t scctx; struct enetc_softc *sc; int error, rid; sc = iflib_get_softc(ctx); scctx = iflib_get_softc_ctx(ctx); sc->ctx = ctx; sc->dev = iflib_get_dev(ctx); sc->shared = scctx; mtx_init(&sc->mii_lock, "enetc_mdio", NULL, MTX_DEF); pci_save_state(sc->dev); pcie_flr(sc->dev, 1000, false); pci_restore_state(sc->dev); rid = PCIR_BAR(ENETC_BAR_REGS); sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->regs == NULL) { device_printf(sc->dev, "Failed to allocate BAR %d\n", ENETC_BAR_REGS); return (ENXIO); } error = iflib_dma_alloc_align(ctx, ENETC_MIN_DESC * sizeof(struct enetc_cbd), ENETC_RING_ALIGN, &sc->ctrl_queue.dma, 0); if (error != 0) { device_printf(sc->dev, "Failed to allocate control ring\n"); goto fail; } sc->ctrl_queue.ring = (struct enetc_cbd*)sc->ctrl_queue.dma.idi_vaddr; scctx->isc_txrx = &enetc_txrx; scctx->isc_tx_nsegments = ENETC_MAX_SCATTER; enetc_max_nqueues(sc, &scctx->isc_nrxqsets_max, &scctx->isc_ntxqsets_max); if (scctx->isc_ntxd[0] % ENETC_DESC_ALIGN != 0) { device_printf(sc->dev, "The number of TX descriptors has to be a multiple of %d\n", ENETC_DESC_ALIGN); error = EINVAL; goto fail; } if (scctx->isc_nrxd[0] % ENETC_DESC_ALIGN != 0) { device_printf(sc->dev, "The number of RX descriptors has to be a multiple of %d\n", ENETC_DESC_ALIGN); error = EINVAL; goto fail; } scctx->isc_txqsizes[0] = scctx->isc_ntxd[0] * sizeof(union enetc_tx_bd); scctx->isc_rxqsizes[0] = scctx->isc_nrxd[0] * sizeof(union enetc_rx_bd); scctx->isc_txd_size[0] = sizeof(union enetc_tx_bd); scctx->isc_rxd_size[0] = sizeof(union enetc_rx_bd); scctx->isc_tx_csum_flags = 0; scctx->isc_capabilities = scctx->isc_capenable = ENETC_IFCAPS; error = enetc_mtu_set(ctx, ETHERMTU); if (error != 0) goto fail; scctx->isc_msix_bar = pci_msix_table_bar(sc->dev); error = enetc_setup_phy(sc); if (error != 0) goto fail; enetc_get_hwaddr(sc); return (0); fail: enetc_detach(ctx); return (error); } static int enetc_attach_post(if_ctx_t ctx) { enetc_init_hw(iflib_get_softc(ctx)); return (0); } static int enetc_detach(if_ctx_t ctx) { struct enetc_softc *sc; int error = 0, i; sc = iflib_get_softc(ctx); for (i = 0; i < sc->rx_num_queues; i++) iflib_irq_free(ctx, &sc->rx_queues[i].irq); - if (sc->miibus != NULL) - device_delete_child(sc->dev, sc->miibus); + bus_generic_detach(sc->dev); if (sc->regs != NULL) error = bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->regs), sc->regs); if (sc->ctrl_queue.dma.idi_size != 0) iflib_dma_free(&sc->ctrl_queue.dma); mtx_destroy(&sc->mii_lock); return (error); } static int enetc_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct enetc_softc *sc; struct enetc_tx_queue *queue; int i; sc = iflib_get_softc(ctx); MPASS(ntxqs == 1); sc->tx_queues = mallocarray(sc->tx_num_queues, sizeof(struct enetc_tx_queue), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->tx_queues == NULL) { device_printf(sc->dev, "Failed to allocate memory for TX queues.\n"); return (ENOMEM); } for (i = 0; i < sc->tx_num_queues; i++) { queue = &sc->tx_queues[i]; queue->sc = sc; queue->ring = (union enetc_tx_bd*)(vaddrs[i]); queue->ring_paddr = paddrs[i]; queue->cidx = 0; } return (0); } static int enetc_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct enetc_softc *sc; struct enetc_rx_queue *queue; int i; sc = iflib_get_softc(ctx); MPASS(nrxqs == 1); sc->rx_queues = mallocarray(sc->rx_num_queues, sizeof(struct enetc_rx_queue), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->rx_queues == NULL) { device_printf(sc->dev, "Failed to allocate memory for RX queues.\n"); return (ENOMEM); } for (i = 0; i < sc->rx_num_queues; i++) { queue = &sc->rx_queues[i]; queue->sc = sc; queue->qid = i; queue->ring = (union enetc_rx_bd*)(vaddrs[i]); queue->ring_paddr = paddrs[i]; } return (0); } static void enetc_queues_free(if_ctx_t ctx) { struct enetc_softc *sc; sc = iflib_get_softc(ctx); if (sc->tx_queues != NULL) { free(sc->tx_queues, M_DEVBUF); sc->tx_queues = NULL; } if (sc->rx_queues != NULL) { free(sc->rx_queues, M_DEVBUF); sc->rx_queues = NULL; } } static void enetc_get_hwaddr(struct enetc_softc *sc) { struct ether_addr hwaddr; uint16_t high; uint32_t low; low = ENETC_PORT_RD4(sc, ENETC_PSIPMAR0(0)); high = ENETC_PORT_RD2(sc, ENETC_PSIPMAR1(0)); memcpy(&hwaddr.octet[0], &low, 4); memcpy(&hwaddr.octet[4], &high, 2); if (ETHER_IS_BROADCAST(hwaddr.octet) || ETHER_IS_MULTICAST(hwaddr.octet) || ETHER_IS_ZERO(hwaddr.octet)) { ether_gen_addr(iflib_get_ifp(sc->ctx), &hwaddr); device_printf(sc->dev, "Failed to obtain MAC address, using a random one\n"); memcpy(&low, &hwaddr.octet[0], 4); memcpy(&high, &hwaddr.octet[4], 2); } iflib_set_mac(sc->ctx, hwaddr.octet); } static void enetc_set_hwaddr(struct enetc_softc *sc) { if_t ifp; uint16_t high; uint32_t low; uint8_t *hwaddr; ifp = iflib_get_ifp(sc->ctx); hwaddr = (uint8_t*)if_getlladdr(ifp); low = *((uint32_t*)hwaddr); high = *((uint16_t*)(hwaddr+4)); ENETC_PORT_WR4(sc, ENETC_PSIPMAR0(0), low); ENETC_PORT_WR2(sc, ENETC_PSIPMAR1(0), high); } static int enetc_setup_rss(struct enetc_softc *sc) { struct iflib_dma_info dma; int error, i, buckets_num = 0; uint8_t *rss_table; uint32_t reg; reg = ENETC_RD4(sc, ENETC_SIPCAPR0); if (reg & ENETC_SIPCAPR0_RSS) { reg = ENETC_RD4(sc, ENETC_SIRSSCAPR); buckets_num = ENETC_SIRSSCAPR_GET_NUM_RSS(reg); } if (buckets_num == 0) return (ENOTSUP); for (i = 0; i < ENETC_RSSHASH_KEY_SIZE / sizeof(uint32_t); i++) { arc4rand((uint8_t *)®, sizeof(reg), 0); ENETC_PORT_WR4(sc, ENETC_PRSSK(i), reg); } ENETC_WR4(sc, ENETC_SIRBGCR, sc->rx_num_queues); error = iflib_dma_alloc_align(sc->ctx, buckets_num * sizeof(*rss_table), ENETC_RING_ALIGN, &dma, 0); if (error != 0) { device_printf(sc->dev, "Failed to allocate DMA buffer for RSS\n"); return (error); } rss_table = (uint8_t *)dma.idi_vaddr; for (i = 0; i < buckets_num; i++) rss_table[i] = i % sc->rx_num_queues; error = enetc_ctrl_send(sc, (BDCR_CMD_RSS << 8) | BDCR_CMD_RSS_WRITE, buckets_num * sizeof(*rss_table), &dma); if (error != 0) device_printf(sc->dev, "Failed to setup RSS table\n"); iflib_dma_free(&dma); return (error); } static int enetc_ctrl_send(struct enetc_softc *sc, uint16_t cmd, uint16_t size, iflib_dma_info_t dma) { struct enetc_ctrl_queue *queue; struct enetc_cbd *desc; int timeout = 1000; queue = &sc->ctrl_queue; desc = &queue->ring[queue->pidx]; if (++queue->pidx == ENETC_MIN_DESC) queue->pidx = 0; desc->addr[0] = (uint32_t)dma->idi_paddr; desc->addr[1] = (uint32_t)(dma->idi_paddr >> 32); desc->index = 0; desc->length = (uint16_t)size; desc->cmd = (uint8_t)cmd; desc->cls = (uint8_t)(cmd >> 8); desc->status_flags = 0; /* Sync command packet, */ bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_PREWRITE); /* and the control ring. */ bus_dmamap_sync(queue->dma.idi_tag, queue->dma.idi_map, BUS_DMASYNC_PREWRITE); ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx); while (--timeout != 0) { DELAY(20); if (ENETC_RD4(sc, ENETC_SICBDRCIR) == queue->pidx) break; } if (timeout == 0) return (ETIMEDOUT); bus_dmamap_sync(dma->idi_tag, dma->idi_map, BUS_DMASYNC_POSTREAD); return (0); } static void enetc_init_hw(struct enetc_softc *sc) { uint32_t val; int error; ENETC_PORT_WR4(sc, ENETC_PM0_CMD_CFG, ENETC_PM0_CMD_TXP | ENETC_PM0_PROMISC | ENETC_PM0_TX_EN | ENETC_PM0_RX_EN); ENETC_PORT_WR4(sc, ENETC_PM0_RX_FIFO, ENETC_PM0_RX_FIFO_VAL); val = ENETC_PSICFGR0_SET_TXBDR(sc->tx_num_queues); val |= ENETC_PSICFGR0_SET_RXBDR(sc->rx_num_queues); val |= ENETC_PSICFGR0_SIVC(ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); ENETC_PORT_WR4(sc, ENETC_PSICFGR0(0), val); ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1)); ENETC_PORT_WR4(sc, ENETC_PVCLCTR, ENETC_VLAN_TYPE_C | ENETC_VLAN_TYPE_S); ENETC_PORT_WR4(sc, ENETC_PSIVLANFMR, ENETC_PSIVLANFMR_VS); ENETC_PORT_WR4(sc, ENETC_PAR_PORT_CFG, ENETC_PAR_PORT_L4CD); ENETC_PORT_WR4(sc, ENETC_PMR, ENETC_PMR_SI0EN | ENETC_PMR_PSPEED_1000M); ENETC_WR4(sc, ENETC_SICAR0, ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); ENETC_WR4(sc, ENETC_SICAR1, ENETC_SICAR_MSI); ENETC_WR4(sc, ENETC_SICAR2, ENETC_SICAR_RD_COHERENT | ENETC_SICAR_WR_COHERENT); enetc_init_ctrl(sc); error = enetc_setup_rss(sc); if (error != 0) ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN); else ENETC_WR4(sc, ENETC_SIMR, ENETC_SIMR_EN | ENETC_SIMR_RSSE); } static void enetc_init_ctrl(struct enetc_softc *sc) { struct enetc_ctrl_queue *queue = &sc->ctrl_queue; ENETC_WR4(sc, ENETC_SICBDRBAR0, (uint32_t)queue->dma.idi_paddr); ENETC_WR4(sc, ENETC_SICBDRBAR1, (uint32_t)(queue->dma.idi_paddr >> 32)); ENETC_WR4(sc, ENETC_SICBDRLENR, queue->dma.idi_size / sizeof(struct enetc_cbd)); queue->pidx = 0; ENETC_WR4(sc, ENETC_SICBDRPIR, queue->pidx); ENETC_WR4(sc, ENETC_SICBDRCIR, queue->pidx); ENETC_WR4(sc, ENETC_SICBDRMR, ENETC_SICBDRMR_EN); } static void enetc_init_tx(struct enetc_softc *sc) { struct enetc_tx_queue *queue; int i; for (i = 0; i < sc->tx_num_queues; i++) { queue = &sc->tx_queues[i]; ENETC_TXQ_WR4(sc, i, ENETC_TBBAR0, (uint32_t)queue->ring_paddr); ENETC_TXQ_WR4(sc, i, ENETC_TBBAR1, (uint32_t)(queue->ring_paddr >> 32)); ENETC_TXQ_WR4(sc, i, ENETC_TBLENR, sc->tx_queue_size); /* * Even though it is undoccumented resetting the TX ring * indices results in TX hang. * Do the same as Linux and simply keep those unchanged * for the drivers lifetime. */ #if 0 ENETC_TXQ_WR4(sc, i, ENETC_TBPIR, 0); ENETC_TXQ_WR4(sc, i, ENETC_TBCIR, 0); #endif ENETC_TXQ_WR4(sc, i, ENETC_TBMR, ENETC_TBMR_EN); } } static void enetc_init_rx(struct enetc_softc *sc) { struct enetc_rx_queue *queue; uint32_t rx_buf_size; int i; rx_buf_size = iflib_get_rx_mbuf_sz(sc->ctx); for (i = 0; i < sc->rx_num_queues; i++) { queue = &sc->rx_queues[i]; ENETC_RXQ_WR4(sc, i, ENETC_RBBAR0, (uint32_t)queue->ring_paddr); ENETC_RXQ_WR4(sc, i, ENETC_RBBAR1, (uint32_t)(queue->ring_paddr >> 32)); ENETC_RXQ_WR4(sc, i, ENETC_RBLENR, sc->rx_queue_size); ENETC_RXQ_WR4(sc, i, ENETC_RBBSR, rx_buf_size); ENETC_RXQ_WR4(sc, i, ENETC_RBPIR, 0); ENETC_RXQ_WR4(sc, i, ENETC_RBCIR, 0); queue->enabled = false; } } static u_int enetc_hash_mac(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint64_t *bitmap = arg; uint64_t address = 0; uint8_t hash = 0; bool bit; int i, j; bcopy(LLADDR(sdl), &address, ETHER_ADDR_LEN); /* * The six bit hash is calculated by xoring every * 6th bit of the address. * It is then used as an index in a bitmap that is * written to the device. */ for (i = 0; i < 6; i++) { bit = 0; for (j = 0; j < 8; j++) bit ^= !!(address & BIT(i + j*6)); hash |= bit << i; } *bitmap |= (1 << hash); return (1); } static void enetc_setup_multicast(if_ctx_t ctx) { struct enetc_softc *sc; if_t ifp; uint64_t bitmap = 0; uint8_t revid; sc = iflib_get_softc(ctx); ifp = iflib_get_ifp(ctx); revid = pci_get_revid(sc->dev); if_foreach_llmaddr(ifp, enetc_hash_mac, &bitmap); /* * In revid 1 of this chip the positions multicast and unicast * hash filter registers are flipped. */ ENETC_PORT_WR4(sc, ENETC_PSIMMHFR0(0, revid == 1), bitmap & UINT32_MAX); ENETC_PORT_WR4(sc, ENETC_PSIMMHFR1(0), bitmap >> 32); } static uint8_t enetc_hash_vid(uint16_t vid) { uint8_t hash = 0; bool bit; int i; for (i = 0;i < 6;i++) { bit = vid & BIT(i); bit ^= !!(vid & BIT(i + 6)); hash |= bit << i; } return (hash); } static void enetc_vlan_register(if_ctx_t ctx, uint16_t vid) { struct enetc_softc *sc; uint8_t hash; uint64_t bitmap; sc = iflib_get_softc(ctx); hash = enetc_hash_vid(vid); /* Check if hash is already present in the bitmap. */ if (++sc->vlan_bitmap[hash] != 1) return; bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0)); bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32; bitmap |= BIT(hash); ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX); ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32); } static void enetc_vlan_unregister(if_ctx_t ctx, uint16_t vid) { struct enetc_softc *sc; uint8_t hash; uint64_t bitmap; sc = iflib_get_softc(ctx); hash = enetc_hash_vid(vid); MPASS(sc->vlan_bitmap[hash] > 0); if (--sc->vlan_bitmap[hash] != 0) return; bitmap = ENETC_PORT_RD4(sc, ENETC_PSIVHFR0(0)); bitmap |= (uint64_t)ENETC_PORT_RD4(sc, ENETC_PSIVHFR1(0)) << 32; bitmap &= ~BIT(hash); ENETC_PORT_WR4(sc, ENETC_PSIVHFR0(0), bitmap & UINT32_MAX); ENETC_PORT_WR4(sc, ENETC_PSIVHFR1(0), bitmap >> 32); } static void enetc_init(if_ctx_t ctx) { struct enetc_softc *sc; struct mii_data *miid; if_t ifp; uint16_t max_frame_length; int baudrate; sc = iflib_get_softc(ctx); ifp = iflib_get_ifp(ctx); max_frame_length = sc->shared->isc_max_frame_size; MPASS(max_frame_length < ENETC_MAX_FRAME_LEN); /* Set max RX and TX frame lengths. */ ENETC_PORT_WR4(sc, ENETC_PM0_MAXFRM, max_frame_length); ENETC_PORT_WR4(sc, ENETC_PTCMSDUR(0), max_frame_length); ENETC_PORT_WR4(sc, ENETC_PTXMBAR, 2 * max_frame_length); /* Set "VLAN promiscious" mode if filtering is disabled. */ if ((if_getcapenable(ifp) & IFCAP_VLAN_HWFILTER) == 0) ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1) | ENETC_PSIPVMR_SET_VP(1)); else ENETC_PORT_WR4(sc, ENETC_PSIPVMR, ENETC_PSIPVMR_SET_VUTA(1)); sc->rbmr = ENETC_RBMR_EN; if (if_getcapenable(ifp) & IFCAP_VLAN_HWTAGGING) sc->rbmr |= ENETC_RBMR_VTE; /* Write MAC address to hardware. */ enetc_set_hwaddr(sc); enetc_init_tx(sc); enetc_init_rx(sc); if (sc->fixed_link) { baudrate = ifmedia_baudrate(sc->fixed_ifmedia.ifm_cur->ifm_media); iflib_link_state_change(sc->ctx, LINK_STATE_UP, baudrate); } else { /* * Can't return an error from this function, there is not much * we can do if this fails. */ miid = device_get_softc(sc->miibus); (void)mii_mediachg(miid); } enetc_promisc_set(ctx, if_getflags(ifp)); } static void enetc_disable_txq(struct enetc_softc *sc, int qid) { qidx_t cidx, pidx; int timeout = 10000; /* this * DELAY(100) = 1s */ /* At this point iflib shouldn't be enquing any more frames. */ pidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBPIR); cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR); while (pidx != cidx && timeout--) { DELAY(100); cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR); } if (timeout == 0) device_printf(sc->dev, "Timeout while waiting for txq%d to stop transmitting packets\n", qid); ENETC_TXQ_WR4(sc, qid, ENETC_TBMR, 0); } static void enetc_stop(if_ctx_t ctx) { struct enetc_softc *sc; int i; sc = iflib_get_softc(ctx); for (i = 0; i < sc->rx_num_queues; i++) ENETC_RXQ_WR4(sc, i, ENETC_RBMR, 0); for (i = 0; i < sc->tx_num_queues; i++) enetc_disable_txq(sc, i); } static int enetc_msix_intr_assign(if_ctx_t ctx, int msix) { struct enetc_softc *sc; struct enetc_rx_queue *rx_queue; struct enetc_tx_queue *tx_queue; int vector = 0, i, error; char irq_name[16]; sc = iflib_get_softc(ctx); MPASS(sc->rx_num_queues + 1 <= ENETC_MSIX_COUNT); MPASS(sc->rx_num_queues == sc->tx_num_queues); for (i = 0; i < sc->rx_num_queues; i++, vector++) { rx_queue = &sc->rx_queues[i]; snprintf(irq_name, sizeof(irq_name), "rxtxq%d", i); error = iflib_irq_alloc_generic(ctx, &rx_queue->irq, vector + 1, IFLIB_INTR_RXTX, NULL, rx_queue, i, irq_name); if (error != 0) goto fail; ENETC_WR4(sc, ENETC_SIMSIRRV(i), vector); ENETC_RXQ_WR4(sc, i, ENETC_RBICR1, ENETC_RX_INTR_TIME_THR); ENETC_RXQ_WR4(sc, i, ENETC_RBICR0, ENETC_RBICR0_ICEN | ENETC_RBICR0_SET_ICPT(ENETC_RX_INTR_PKT_THR)); } vector = 0; for (i = 0;i < sc->tx_num_queues; i++, vector++) { tx_queue = &sc->tx_queues[i]; snprintf(irq_name, sizeof(irq_name), "txq%d", i); iflib_softirq_alloc_generic(ctx, &tx_queue->irq, IFLIB_INTR_TX, tx_queue, i, irq_name); ENETC_WR4(sc, ENETC_SIMSITRV(i), vector); } return (0); fail: for (i = 0; i < sc->rx_num_queues; i++) { rx_queue = &sc->rx_queues[i]; iflib_irq_free(ctx, &rx_queue->irq); } return (error); } static int enetc_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct enetc_softc *sc; sc = iflib_get_softc(ctx); ENETC_TXQ_RD4(sc, qid, ENETC_TBIDR); return (0); } static int enetc_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { struct enetc_softc *sc; sc = iflib_get_softc(ctx); ENETC_RXQ_RD4(sc, qid, ENETC_RBIDR); return (0); } static void enetc_intr_enable(if_ctx_t ctx) { struct enetc_softc *sc; int i; sc = iflib_get_softc(ctx); for (i = 0; i < sc->rx_num_queues; i++) ENETC_RXQ_WR4(sc, i, ENETC_RBIER, ENETC_RBIER_RXTIE); for (i = 0; i < sc->tx_num_queues; i++) ENETC_TXQ_WR4(sc, i, ENETC_TBIER, ENETC_TBIER_TXF); } static void enetc_intr_disable(if_ctx_t ctx) { struct enetc_softc *sc; int i; sc = iflib_get_softc(ctx); for (i = 0; i < sc->rx_num_queues; i++) ENETC_RXQ_WR4(sc, i, ENETC_RBIER, 0); for (i = 0; i < sc->tx_num_queues; i++) ENETC_TXQ_WR4(sc, i, ENETC_TBIER, 0); } static int enetc_isc_txd_encap(void *data, if_pkt_info_t ipi) { struct enetc_softc *sc = data; struct enetc_tx_queue *queue; union enetc_tx_bd *desc; bus_dma_segment_t *segs; qidx_t pidx, queue_len; qidx_t i = 0; queue = &sc->tx_queues[ipi->ipi_qsidx]; segs = ipi->ipi_segs; pidx = ipi->ipi_pidx; queue_len = sc->tx_queue_size; /* * First descriptor is special. We use it to set frame * related information and offloads, e.g. VLAN tag. */ desc = &queue->ring[pidx]; bzero(desc, sizeof(*desc)); desc->frm_len = ipi->ipi_len; desc->addr = segs[i].ds_addr; desc->buf_len = segs[i].ds_len; if (ipi->ipi_flags & IPI_TX_INTR) desc->flags = ENETC_TXBD_FLAGS_FI; i++; if (++pidx == queue_len) pidx = 0; if (ipi->ipi_mflags & M_VLANTAG) { /* VLAN tag is inserted in a separate descriptor. */ desc->flags |= ENETC_TXBD_FLAGS_EX; desc = &queue->ring[pidx]; bzero(desc, sizeof(*desc)); desc->ext.vid = ipi->ipi_vtag; desc->ext.e_flags = ENETC_TXBD_E_FLAGS_VLAN_INS; if (++pidx == queue_len) pidx = 0; } /* Now add remaining descriptors. */ for (;i < ipi->ipi_nsegs; i++) { desc = &queue->ring[pidx]; bzero(desc, sizeof(*desc)); desc->addr = segs[i].ds_addr; desc->buf_len = segs[i].ds_len; if (++pidx == queue_len) pidx = 0; } desc->flags |= ENETC_TXBD_FLAGS_F; ipi->ipi_new_pidx = pidx; return (0); } static void enetc_isc_txd_flush(void *data, uint16_t qid, qidx_t pidx) { struct enetc_softc *sc = data; ENETC_TXQ_WR4(sc, qid, ENETC_TBPIR, pidx); } static int enetc_isc_txd_credits_update(void *data, uint16_t qid, bool clear) { struct enetc_softc *sc = data; struct enetc_tx_queue *queue; int cidx, hw_cidx, count; queue = &sc->tx_queues[qid]; hw_cidx = ENETC_TXQ_RD4(sc, qid, ENETC_TBCIR) & ENETC_TBCIR_IDX_MASK; cidx = queue->cidx; /* * RM states that the ring can hold at most ring_size - 1 descriptors. * Thanks to that we can assume that the ring is empty if cidx == pidx. * This requirement is guaranteed implicitly by iflib as it will only * encap a new frame if we have at least nfrags + 2 descriptors available * on the ring. This driver uses at most one additional descriptor for * VLAN tag insertion. * Also RM states that the TBCIR register is only updated once all * descriptors in the chain have been processed. */ if (cidx == hw_cidx) return (0); if (!clear) return (1); count = hw_cidx - cidx; if (count < 0) count += sc->tx_queue_size; queue->cidx = hw_cidx; return (count); } static int enetc_isc_rxd_available(void *data, uint16_t qid, qidx_t pidx, qidx_t budget) { struct enetc_softc *sc = data; struct enetc_rx_queue *queue; qidx_t hw_pidx, queue_len; union enetc_rx_bd *desc; int count = 0; queue = &sc->rx_queues[qid]; desc = &queue->ring[pidx]; queue_len = sc->rx_queue_size; if (desc->r.lstatus == 0) return (0); if (budget == 1) return (1); hw_pidx = ENETC_RXQ_RD4(sc, qid, ENETC_RBPIR); while (pidx != hw_pidx && count < budget) { desc = &queue->ring[pidx]; if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F) count++; if (++pidx == queue_len) pidx = 0; } return (count); } static int enetc_isc_rxd_pkt_get(void *data, if_rxd_info_t ri) { struct enetc_softc *sc = data; struct enetc_rx_queue *queue; union enetc_rx_bd *desc; uint16_t buf_len, pkt_size = 0; qidx_t cidx, queue_len; uint32_t status; int i; cidx = ri->iri_cidx; queue = &sc->rx_queues[ri->iri_qsidx]; desc = &queue->ring[cidx]; status = desc->r.lstatus; queue_len = sc->rx_queue_size; /* * Ready bit will be set only when all descriptors * in the chain have been processed. */ if ((status & ENETC_RXBD_LSTATUS_R) == 0) return (EAGAIN); /* Pass RSS hash. */ if (status & ENETC_RXBD_FLAG_RSSV) { ri->iri_flowid = desc->r.rss_hash; ri->iri_rsstype = M_HASHTYPE_OPAQUE_HASH; } /* Pass IP checksum status. */ ri->iri_csum_flags = CSUM_IP_CHECKED; if ((desc->r.parse_summary & ENETC_RXBD_PARSER_ERROR) == 0) ri->iri_csum_flags |= CSUM_IP_VALID; /* Pass extracted VLAN tag. */ if (status & ENETC_RXBD_FLAG_VLAN) { ri->iri_vtag = desc->r.vlan_opt; ri->iri_flags = M_VLANTAG; } for (i = 0; i < ENETC_MAX_SCATTER; i++) { buf_len = desc->r.buf_len; ri->iri_frags[i].irf_idx = cidx; ri->iri_frags[i].irf_len = buf_len; pkt_size += buf_len; if (desc->r.lstatus & ENETC_RXBD_LSTATUS_F) break; if (++cidx == queue_len) cidx = 0; desc = &queue->ring[cidx]; } ri->iri_nfrags = i + 1; ri->iri_len = pkt_size; MPASS(desc->r.lstatus & ENETC_RXBD_LSTATUS_F); if (status & ENETC_RXBD_LSTATUS(ENETC_RXBD_ERR_MASK)) return (EBADMSG); return (0); } static void enetc_isc_rxd_refill(void *data, if_rxd_update_t iru) { struct enetc_softc *sc = data; struct enetc_rx_queue *queue; union enetc_rx_bd *desc; qidx_t pidx, queue_len; uint64_t *paddrs; int i, count; queue = &sc->rx_queues[iru->iru_qsidx]; paddrs = iru->iru_paddrs; pidx = iru->iru_pidx; count = iru->iru_count; queue_len = sc->rx_queue_size; for (i = 0; i < count; i++) { desc = &queue->ring[pidx]; bzero(desc, sizeof(*desc)); desc->w.addr = paddrs[i]; if (++pidx == queue_len) pidx = 0; } /* * After enabling the queue NIC will prefetch the first * 8 descriptors. It probably assumes that the RX is fully * refilled when cidx == pidx. * Enable it only if we have enough descriptors ready on the ring. */ if (!queue->enabled && pidx >= 8) { ENETC_RXQ_WR4(sc, iru->iru_qsidx, ENETC_RBMR, sc->rbmr); queue->enabled = true; } } static void enetc_isc_rxd_flush(void *data, uint16_t qid, uint8_t flid, qidx_t pidx) { struct enetc_softc *sc = data; ENETC_RXQ_WR4(sc, qid, ENETC_RBCIR, pidx); } static uint64_t enetc_get_counter(if_ctx_t ctx, ift_counter cnt) { struct enetc_softc *sc; if_t ifp; sc = iflib_get_softc(ctx); ifp = iflib_get_ifp(ctx); switch (cnt) { case IFCOUNTER_IERRORS: return (ENETC_PORT_RD8(sc, ENETC_PM0_RERR)); case IFCOUNTER_OERRORS: return (ENETC_PORT_RD8(sc, ENETC_PM0_TERR)); default: return (if_get_counter_default(ifp, cnt)); } } static int enetc_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct enetc_softc *sc = iflib_get_softc(ctx); uint32_t max_frame_size; max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + sizeof(struct ether_vlan_header); if (max_frame_size > ENETC_MAX_FRAME_LEN) return (EINVAL); sc->shared->isc_max_frame_size = max_frame_size; return (0); } static int enetc_promisc_set(if_ctx_t ctx, int flags) { struct enetc_softc *sc; uint32_t reg = 0; sc = iflib_get_softc(ctx); if (flags & IFF_PROMISC) reg = ENETC_PSIPMR_SET_UP(0) | ENETC_PSIPMR_SET_MP(0); else if (flags & IFF_ALLMULTI) reg = ENETC_PSIPMR_SET_MP(0); ENETC_PORT_WR4(sc, ENETC_PSIPMR, reg); return (0); } static void enetc_timer(if_ctx_t ctx, uint16_t qid) { /* * Poll PHY status. Do this only for qid 0 to save * some cycles. */ if (qid == 0) iflib_admin_intr_deferred(ctx); } static void enetc_update_admin_status(if_ctx_t ctx) { struct enetc_softc *sc; struct mii_data *miid; sc = iflib_get_softc(ctx); if (!sc->fixed_link) { miid = device_get_softc(sc->miibus); mii_tick(miid); } } static bool enetc_if_needs_restart(if_ctx_t ctx __unused, enum iflib_restart_event event) { switch (event) { case IFLIB_RESTART_VLAN_CONFIG: default: return (false); } } static int enetc_miibus_readreg(device_t dev, int phy, int reg) { struct enetc_softc *sc; int val; sc = iflib_get_softc(device_get_softc(dev)); mtx_lock(&sc->mii_lock); val = enetc_mdio_read(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE, phy, reg); mtx_unlock(&sc->mii_lock); return (val); } static int enetc_miibus_writereg(device_t dev, int phy, int reg, int data) { struct enetc_softc *sc; int ret; sc = iflib_get_softc(device_get_softc(dev)); mtx_lock(&sc->mii_lock); ret = enetc_mdio_write(sc->regs, ENETC_PORT_BASE + ENETC_EMDIO_BASE, phy, reg, data); mtx_unlock(&sc->mii_lock); return (ret); } static void enetc_miibus_linkchg(device_t dev) { enetc_miibus_statchg(dev); } static void enetc_miibus_statchg(device_t dev) { struct enetc_softc *sc; struct mii_data *miid; int link_state, baudrate; sc = iflib_get_softc(device_get_softc(dev)); miid = device_get_softc(sc->miibus); baudrate = ifmedia_baudrate(miid->mii_media_active); if (miid->mii_media_status & IFM_AVALID) { if (miid->mii_media_status & IFM_ACTIVE) link_state = LINK_STATE_UP; else link_state = LINK_STATE_DOWN; } else { link_state = LINK_STATE_UNKNOWN; } iflib_link_state_change(sc->ctx, link_state, baudrate); } static int enetc_media_change(if_t ifp) { struct enetc_softc *sc; struct mii_data *miid; sc = iflib_get_softc(if_getsoftc(ifp)); miid = device_get_softc(sc->miibus); mii_mediachg(miid); return (0); } static void enetc_media_status(if_t ifp, struct ifmediareq* ifmr) { struct enetc_softc *sc; struct mii_data *miid; sc = iflib_get_softc(if_getsoftc(ifp)); miid = device_get_softc(sc->miibus); mii_pollstat(miid); ifmr->ifm_active = miid->mii_media_active; ifmr->ifm_status = miid->mii_media_status; } static int enetc_fixed_media_change(if_t ifp) { if_printf(ifp, "Can't change media in fixed-link mode.\n"); return (0); } static void enetc_fixed_media_status(if_t ifp, struct ifmediareq* ifmr) { struct enetc_softc *sc; sc = iflib_get_softc(if_getsoftc(ifp)); ifmr->ifm_status = IFM_AVALID | IFM_ACTIVE; ifmr->ifm_active = sc->fixed_ifmedia.ifm_cur->ifm_media; return; } diff --git a/sys/dev/gem/if_gem.c b/sys/dev/gem/if_gem.c index 434c5309d019..74504a950d31 100644 --- a/sys/dev/gem/if_gem.c +++ b/sys/dev/gem/if_gem.c @@ -1,2232 +1,2232 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (C) 2001 Eduardo Horvath. * Copyright (c) 2001-2003 Thomas Moestl * Copyright (c) 2007 Marius Strobl * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * from: NetBSD: gem.c,v 1.21 2002/06/01 23:50:58 lukem Exp */ #include /* * Driver for Apple GMAC, Sun ERI and Sun GEM Ethernet controllers */ #if 0 #define GEM_DEBUG #endif #if 0 /* XXX: In case of emergency, re-enable this. */ #define GEM_RINT_TIMEOUT #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include CTASSERT(powerof2(GEM_NRXDESC) && GEM_NRXDESC >= 32 && GEM_NRXDESC <= 8192); CTASSERT(powerof2(GEM_NTXDESC) && GEM_NTXDESC >= 32 && GEM_NTXDESC <= 8192); #define GEM_TRIES 10000 /* * The hardware supports basic TCP/UDP checksum offloading. However, * the hardware doesn't compensate the checksum for UDP datagram which * can yield to 0x0. As a safe guard, UDP checksum offload is disabled * by default. It can be reactivated by setting special link option * link0 with ifconfig(8). */ #define GEM_CSUM_FEATURES (CSUM_TCP) static int gem_add_rxbuf(struct gem_softc *sc, int idx); static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set); static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); static int gem_disable_rx(struct gem_softc *sc); static int gem_disable_tx(struct gem_softc *sc); static void gem_eint(struct gem_softc *sc, u_int status); static void gem_init(void *xsc); static void gem_init_locked(struct gem_softc *sc); static void gem_init_regs(struct gem_softc *sc); static int gem_ioctl(if_t ifp, u_long cmd, caddr_t data); static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head); static int gem_meminit(struct gem_softc *sc); static void gem_mifinit(struct gem_softc *sc); static void gem_reset(struct gem_softc *sc); static int gem_reset_rx(struct gem_softc *sc); static void gem_reset_rxdma(struct gem_softc *sc); static int gem_reset_tx(struct gem_softc *sc); static u_int gem_ringsize(u_int sz); static void gem_rint(struct gem_softc *sc); #ifdef GEM_RINT_TIMEOUT static void gem_rint_timeout(void *arg); #endif static inline void gem_rxcksum(struct mbuf *m, uint64_t flags); static void gem_rxdrain(struct gem_softc *sc); static void gem_setladrf(struct gem_softc *sc); static void gem_start(if_t ifp); static void gem_start_locked(if_t ifp); static void gem_stop(if_t ifp, int disable); static void gem_tick(void *arg); static void gem_tint(struct gem_softc *sc); static inline void gem_txkick(struct gem_softc *sc); static int gem_watchdog(struct gem_softc *sc); DRIVER_MODULE(miibus, gem, miibus_driver, 0, 0); MODULE_DEPEND(gem, miibus, 1, 1, 1); #ifdef GEM_DEBUG #include #define KTR_GEM KTR_SPARE2 #endif int gem_attach(struct gem_softc *sc) { struct gem_txsoft *txs; if_t ifp; int error, i, phy; uint32_t v; if (bootverbose) device_printf(sc->sc_dev, "flags=0x%x\n", sc->sc_flags); /* Set up ifnet structure. */ ifp = sc->sc_ifp = if_alloc(IFT_ETHER); sc->sc_csum_features = GEM_CSUM_FEATURES; if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setstartfn(ifp, gem_start); if_setioctlfn(ifp, gem_ioctl); if_setinitfn(ifp, gem_init); if_setsendqlen(ifp, GEM_TXQUEUELEN); if_setsendqready(ifp); callout_init_mtx(&sc->sc_tick_ch, &sc->sc_mtx, 0); #ifdef GEM_RINT_TIMEOUT callout_init_mtx(&sc->sc_rx_ch, &sc->sc_mtx, 0); #endif /* Make sure the chip is stopped. */ gem_reset(sc); error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, BUS_SPACE_MAXSIZE_32BIT, 0, BUS_SPACE_MAXSIZE_32BIT, 0, NULL, NULL, &sc->sc_pdmatag); if (error != 0) goto fail_ifnet; error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, 1, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_rdmatag); if (error != 0) goto fail_ptag; error = bus_dma_tag_create(sc->sc_pdmatag, 1, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES * GEM_NTXSEGS, GEM_NTXSEGS, MCLBYTES, BUS_DMA_ALLOCNOW, NULL, NULL, &sc->sc_tdmatag); if (error != 0) goto fail_rtag; error = bus_dma_tag_create(sc->sc_pdmatag, PAGE_SIZE, 0, BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL, sizeof(struct gem_control_data), 1, sizeof(struct gem_control_data), 0, NULL, NULL, &sc->sc_cdmatag); if (error != 0) goto fail_ttag; /* * Allocate the control data structures, create and load the * DMA map for it. */ if ((error = bus_dmamem_alloc(sc->sc_cdmatag, (void **)&sc->sc_control_data, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->sc_cddmamap)) != 0) { device_printf(sc->sc_dev, "unable to allocate control data, error = %d\n", error); goto fail_ctag; } sc->sc_cddma = 0; if ((error = bus_dmamap_load(sc->sc_cdmatag, sc->sc_cddmamap, sc->sc_control_data, sizeof(struct gem_control_data), gem_cddma_callback, sc, 0)) != 0 || sc->sc_cddma == 0) { device_printf(sc->sc_dev, "unable to load control data DMA map, error = %d\n", error); goto fail_cmem; } /* * Initialize the transmit job descriptors. */ STAILQ_INIT(&sc->sc_txfreeq); STAILQ_INIT(&sc->sc_txdirtyq); /* * Create the transmit buffer DMA maps. */ error = ENOMEM; for (i = 0; i < GEM_TXQUEUELEN; i++) { txs = &sc->sc_txsoft[i]; txs->txs_mbuf = NULL; txs->txs_ndescs = 0; if ((error = bus_dmamap_create(sc->sc_tdmatag, 0, &txs->txs_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create TX DMA map %d, error = %d\n", i, error); goto fail_txd; } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); } /* * Create the receive buffer DMA maps. */ for (i = 0; i < GEM_NRXDESC; i++) { if ((error = bus_dmamap_create(sc->sc_rdmatag, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { device_printf(sc->sc_dev, "unable to create RX DMA map %d, error = %d\n", i, error); goto fail_rxd; } sc->sc_rxsoft[i].rxs_mbuf = NULL; } /* Bypass probing PHYs if we already know for sure to use a SERDES. */ if ((sc->sc_flags & GEM_SERDES) != 0) goto serdes; GEM_WRITE_4(sc, GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_MII); GEM_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); gem_mifinit(sc); /* * Look for an external PHY. */ error = ENXIO; v = GEM_READ_4(sc, GEM_MIF_CONFIG); if ((v & GEM_MIF_CONFIG_MDI1) != 0) { v |= GEM_MIF_CONFIG_PHY_SEL; GEM_WRITE_4(sc, GEM_MIF_CONFIG, v); GEM_BARRIER(sc, GEM_MIF_CONFIG, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, MIIF_DOPAUSE); } /* * Fall back on an internal PHY if no external PHY was found. * Note that with Apple (K2) GMACs GEM_MIF_CONFIG_MDI0 can't be * trusted when the firmware has powered down the chip. */ if (error != 0 && ((v & GEM_MIF_CONFIG_MDI0) != 0 || GEM_IS_APPLE(sc))) { v &= ~GEM_MIF_CONFIG_PHY_SEL; GEM_WRITE_4(sc, GEM_MIF_CONFIG, v); GEM_BARRIER(sc, GEM_MIF_CONFIG, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); switch (sc->sc_variant) { case GEM_APPLE_K2_GMAC: phy = GEM_PHYAD_INTERNAL; break; case GEM_APPLE_GMAC: phy = GEM_PHYAD_EXTERNAL; break; default: phy = MII_PHY_ANY; break; } error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, MIIF_DOPAUSE); } /* * Try the external PCS SERDES if we didn't find any PHYs. */ if (error != 0 && sc->sc_variant == GEM_SUN_GEM) { serdes: GEM_WRITE_4(sc, GEM_MII_DATAPATH_MODE, GEM_MII_DATAPATH_SERDES); GEM_BARRIER(sc, GEM_MII_DATAPATH_MODE, 4, BUS_SPACE_BARRIER_WRITE); GEM_WRITE_4(sc, GEM_MII_SLINK_CONTROL, GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); GEM_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4, BUS_SPACE_BARRIER_WRITE); GEM_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE); GEM_BARRIER(sc, GEM_MII_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); sc->sc_flags |= GEM_SERDES; error = mii_attach(sc->sc_dev, &sc->sc_miibus, ifp, gem_mediachange, gem_mediastatus, BMSR_DEFCAPMASK, GEM_PHYAD_EXTERNAL, MII_OFFSET_ANY, MIIF_DOPAUSE); } if (error != 0) { device_printf(sc->sc_dev, "attaching PHYs failed\n"); goto fail_rxd; } sc->sc_mii = device_get_softc(sc->sc_miibus); /* * From this point forward, the attachment cannot fail. A failure * before this point releases all resources that may have been * allocated. */ /* Get RX FIFO size. */ sc->sc_rxfifosize = 64 * GEM_READ_4(sc, GEM_RX_FIFO_SIZE); /* Get TX FIFO size. */ v = GEM_READ_4(sc, GEM_TX_FIFO_SIZE); device_printf(sc->sc_dev, "%ukB RX FIFO, %ukB TX FIFO\n", sc->sc_rxfifosize / 1024, v / 16); /* Attach the interface. */ ether_ifattach(ifp, sc->sc_enaddr); /* * Tell the upper layer(s) we support long frames/checksum offloads. */ if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); if_setcapabilitiesbit(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM, 0); if_sethwassistbits(ifp, sc->sc_csum_features, 0); if_setcapenablebit(ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM, 0); return (0); /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_rxd: for (i = 0; i < GEM_NRXDESC; i++) if (sc->sc_rxsoft[i].rxs_dmamap != NULL) bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rxsoft[i].rxs_dmamap); fail_txd: for (i = 0; i < GEM_TXQUEUELEN; i++) if (sc->sc_txsoft[i].txs_dmamap != NULL) bus_dmamap_destroy(sc->sc_tdmatag, sc->sc_txsoft[i].txs_dmamap); bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); fail_cmem: bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, sc->sc_cddmamap); fail_ctag: bus_dma_tag_destroy(sc->sc_cdmatag); fail_ttag: bus_dma_tag_destroy(sc->sc_tdmatag); fail_rtag: bus_dma_tag_destroy(sc->sc_rdmatag); fail_ptag: bus_dma_tag_destroy(sc->sc_pdmatag); fail_ifnet: if_free(ifp); return (error); } void gem_detach(struct gem_softc *sc) { if_t ifp = sc->sc_ifp; int i; ether_ifdetach(ifp); GEM_LOCK(sc); gem_stop(ifp, 1); GEM_UNLOCK(sc); callout_drain(&sc->sc_tick_ch); #ifdef GEM_RINT_TIMEOUT callout_drain(&sc->sc_rx_ch); #endif if_free(ifp); - device_delete_child(sc->sc_dev, sc->sc_miibus); + bus_generic_detach(sc->sc_dev); for (i = 0; i < GEM_NRXDESC; i++) if (sc->sc_rxsoft[i].rxs_dmamap != NULL) bus_dmamap_destroy(sc->sc_rdmatag, sc->sc_rxsoft[i].rxs_dmamap); for (i = 0; i < GEM_TXQUEUELEN; i++) if (sc->sc_txsoft[i].txs_dmamap != NULL) bus_dmamap_destroy(sc->sc_tdmatag, sc->sc_txsoft[i].txs_dmamap); GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_cdmatag, sc->sc_cddmamap); bus_dmamem_free(sc->sc_cdmatag, sc->sc_control_data, sc->sc_cddmamap); bus_dma_tag_destroy(sc->sc_cdmatag); bus_dma_tag_destroy(sc->sc_tdmatag); bus_dma_tag_destroy(sc->sc_rdmatag); bus_dma_tag_destroy(sc->sc_pdmatag); } void gem_suspend(struct gem_softc *sc) { if_t ifp = sc->sc_ifp; GEM_LOCK(sc); gem_stop(ifp, 0); GEM_UNLOCK(sc); } void gem_resume(struct gem_softc *sc) { if_t ifp = sc->sc_ifp; GEM_LOCK(sc); /* * On resume all registers have to be initialized again like * after power-on. */ sc->sc_flags &= ~GEM_INITED; if (if_getflags(ifp) & IFF_UP) gem_init_locked(sc); GEM_UNLOCK(sc); } static inline void gem_rxcksum(struct mbuf *m, uint64_t flags) { struct ether_header *eh; struct ip *ip; struct udphdr *uh; uint16_t *opts; int32_t hlen, len, pktlen; uint32_t temp32; uint16_t cksum; pktlen = m->m_pkthdr.len; if (pktlen < sizeof(struct ether_header) + sizeof(struct ip)) return; eh = mtod(m, struct ether_header *); if (eh->ether_type != htons(ETHERTYPE_IP)) return; ip = (struct ip *)(eh + 1); if (ip->ip_v != IPVERSION) return; hlen = ip->ip_hl << 2; pktlen -= sizeof(struct ether_header); if (hlen < sizeof(struct ip)) return; if (ntohs(ip->ip_len) < hlen) return; if (ntohs(ip->ip_len) != pktlen) return; if (ip->ip_off & htons(IP_MF | IP_OFFMASK)) return; /* Cannot handle fragmented packet. */ switch (ip->ip_p) { case IPPROTO_TCP: if (pktlen < (hlen + sizeof(struct tcphdr))) return; break; case IPPROTO_UDP: if (pktlen < (hlen + sizeof(struct udphdr))) return; uh = (struct udphdr *)((uint8_t *)ip + hlen); if (uh->uh_sum == 0) return; /* no checksum */ break; default: return; } cksum = ~(flags & GEM_RD_CHECKSUM); /* checksum fixup for IP options */ len = hlen - sizeof(struct ip); if (len > 0) { opts = (uint16_t *)(ip + 1); for (; len > 0; len -= sizeof(uint16_t), opts++) { temp32 = cksum - *opts; temp32 = (temp32 >> 16) + (temp32 & 65535); cksum = temp32 & 65535; } } m->m_pkthdr.csum_flags |= CSUM_DATA_VALID; m->m_pkthdr.csum_data = cksum; } static void gem_cddma_callback(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) { struct gem_softc *sc = xsc; if (error != 0) return; if (nsegs != 1) panic("%s: bad control buffer segment count", __func__); sc->sc_cddma = segs[0].ds_addr; } static void gem_tick(void *arg) { struct gem_softc *sc = arg; if_t ifp = sc->sc_ifp; uint32_t v; GEM_LOCK_ASSERT(sc, MA_OWNED); /* * Unload collision and error counters. */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, GEM_READ_4(sc, GEM_MAC_NORM_COLL_CNT) + GEM_READ_4(sc, GEM_MAC_FIRST_COLL_CNT)); v = GEM_READ_4(sc, GEM_MAC_EXCESS_COLL_CNT) + GEM_READ_4(sc, GEM_MAC_LATE_COLL_CNT); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, v); if_inc_counter(ifp, IFCOUNTER_OERRORS, v); if_inc_counter(ifp, IFCOUNTER_IERRORS, GEM_READ_4(sc, GEM_MAC_RX_LEN_ERR_CNT) + GEM_READ_4(sc, GEM_MAC_RX_ALIGN_ERR) + GEM_READ_4(sc, GEM_MAC_RX_CRC_ERR_CNT) + GEM_READ_4(sc, GEM_MAC_RX_CODE_VIOL)); /* * Then clear the hardware counters. */ GEM_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); GEM_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); GEM_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); GEM_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); GEM_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0); GEM_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0); GEM_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0); GEM_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0); mii_tick(sc->sc_mii); if (gem_watchdog(sc) == EJUSTRETURN) return; callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); } static int gem_bitwait(struct gem_softc *sc, bus_addr_t r, uint32_t clr, uint32_t set) { int i; uint32_t reg; for (i = GEM_TRIES; i--; DELAY(100)) { reg = GEM_READ_4(sc, r); if ((reg & clr) == 0 && (reg & set) == set) return (1); } return (0); } static void gem_reset(struct gem_softc *sc) { #ifdef GEM_DEBUG CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); #endif gem_reset_rx(sc); gem_reset_tx(sc); /* Do a full reset. */ GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX); GEM_BARRIER(sc, GEM_RESET, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX | GEM_RESET_TX, 0)) device_printf(sc->sc_dev, "cannot reset device\n"); } static void gem_rxdrain(struct gem_softc *sc) { struct gem_rxsoft *rxs; int i; for (i = 0; i < GEM_NRXDESC; i++) { rxs = &sc->sc_rxsoft[i]; if (rxs->rxs_mbuf != NULL) { bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); m_freem(rxs->rxs_mbuf); rxs->rxs_mbuf = NULL; } } } static void gem_stop(if_t ifp, int disable) { struct gem_softc *sc = if_getsoftc(ifp); struct gem_txsoft *txs; #ifdef GEM_DEBUG CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); #endif callout_stop(&sc->sc_tick_ch); #ifdef GEM_RINT_TIMEOUT callout_stop(&sc->sc_rx_ch); #endif gem_reset_tx(sc); gem_reset_rx(sc); /* * Release any queued transmit buffers. */ while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); if (txs->txs_ndescs != 0) { bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); if (txs->txs_mbuf != NULL) { m_freem(txs->txs_mbuf); txs->txs_mbuf = NULL; } } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); } if (disable) gem_rxdrain(sc); /* * Mark the interface down and cancel the watchdog timer. */ if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); sc->sc_flags &= ~GEM_LINK; sc->sc_wdog_timer = 0; } static int gem_reset_rx(struct gem_softc *sc) { /* * Resetting while DMA is in progress can cause a bus hang, so we * disable DMA first. */ (void)gem_disable_rx(sc); GEM_WRITE_4(sc, GEM_RX_CONFIG, 0); GEM_BARRIER(sc, GEM_RX_CONFIG, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!gem_bitwait(sc, GEM_RX_CONFIG, GEM_RX_CONFIG_RXDMA_EN, 0)) device_printf(sc->sc_dev, "cannot disable RX DMA\n"); /* Wait 5ms extra. */ DELAY(5000); /* Reset the ERX. */ GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_RX); GEM_BARRIER(sc, GEM_RESET, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_RX, 0)) { device_printf(sc->sc_dev, "cannot reset receiver\n"); return (1); } /* Finally, reset RX MAC. */ GEM_WRITE_4(sc, GEM_MAC_RXRESET, 1); GEM_BARRIER(sc, GEM_MAC_RXRESET, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!gem_bitwait(sc, GEM_MAC_RXRESET, 1, 0)) { device_printf(sc->sc_dev, "cannot reset RX MAC\n"); return (1); } return (0); } /* * Reset the receiver DMA engine. * * Intended to be used in case of GEM_INTR_RX_TAG_ERR, GEM_MAC_RX_OVERFLOW * etc in order to reset the receiver DMA engine only and not do a full * reset which amongst others also downs the link and clears the FIFOs. */ static void gem_reset_rxdma(struct gem_softc *sc) { int i; if (gem_reset_rx(sc) != 0) { if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING); return (gem_init_locked(sc)); } for (i = 0; i < GEM_NRXDESC; i++) if (sc->sc_rxsoft[i].rxs_mbuf != NULL) GEM_UPDATE_RXDESC(sc, i); sc->sc_rxptr = 0; GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); /* NOTE: we use only 32-bit DMA addresses here. */ GEM_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); GEM_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); GEM_WRITE_4(sc, GEM_RX_CONFIG, gem_ringsize(GEM_NRXDESC /* XXX */) | ((ETHER_HDR_LEN + sizeof(struct ip)) << GEM_RX_CONFIG_CXM_START_SHFT) | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT)); GEM_WRITE_4(sc, GEM_RX_BLANKING, ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) << GEM_RX_BLANKING_TIME_SHIFT) | 6); GEM_WRITE_4(sc, GEM_RX_PAUSE_THRESH, (3 * sc->sc_rxfifosize / 256) | ((sc->sc_rxfifosize / 256) << 12)); GEM_WRITE_4(sc, GEM_RX_CONFIG, GEM_READ_4(sc, GEM_RX_CONFIG) | GEM_RX_CONFIG_RXDMA_EN); GEM_WRITE_4(sc, GEM_MAC_RX_MASK, GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); /* * Clear the RX filter and reprogram it. This will also set the * current RX MAC configuration and enable it. */ gem_setladrf(sc); } static int gem_reset_tx(struct gem_softc *sc) { /* * Resetting while DMA is in progress can cause a bus hang, so we * disable DMA first. */ (void)gem_disable_tx(sc); GEM_WRITE_4(sc, GEM_TX_CONFIG, 0); GEM_BARRIER(sc, GEM_TX_CONFIG, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!gem_bitwait(sc, GEM_TX_CONFIG, GEM_TX_CONFIG_TXDMA_EN, 0)) device_printf(sc->sc_dev, "cannot disable TX DMA\n"); /* Wait 5ms extra. */ DELAY(5000); /* Finally, reset the ETX. */ GEM_WRITE_4(sc, GEM_RESET, GEM_RESET_TX); GEM_BARRIER(sc, GEM_RESET, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!gem_bitwait(sc, GEM_RESET, GEM_RESET_TX, 0)) { device_printf(sc->sc_dev, "cannot reset transmitter\n"); return (1); } return (0); } static int gem_disable_rx(struct gem_softc *sc) { GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, GEM_READ_4(sc, GEM_MAC_RX_CONFIG) & ~GEM_MAC_RX_ENABLE); GEM_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_ENABLE, 0)) return (1); device_printf(sc->sc_dev, "cannot disable RX MAC\n"); return (0); } static int gem_disable_tx(struct gem_softc *sc) { GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, GEM_READ_4(sc, GEM_MAC_TX_CONFIG) & ~GEM_MAC_TX_ENABLE); GEM_BARRIER(sc, GEM_MAC_TX_CONFIG, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (gem_bitwait(sc, GEM_MAC_TX_CONFIG, GEM_MAC_TX_ENABLE, 0)) return (1); device_printf(sc->sc_dev, "cannot disable TX MAC\n"); return (0); } static int gem_meminit(struct gem_softc *sc) { struct gem_rxsoft *rxs; int error, i; GEM_LOCK_ASSERT(sc, MA_OWNED); /* * Initialize the transmit descriptor ring. */ for (i = 0; i < GEM_NTXDESC; i++) { sc->sc_txdescs[i].gd_flags = 0; sc->sc_txdescs[i].gd_addr = 0; } sc->sc_txfree = GEM_MAXTXFREE; sc->sc_txnext = 0; sc->sc_txwin = 0; /* * Initialize the receive descriptor and receive job * descriptor rings. */ for (i = 0; i < GEM_NRXDESC; i++) { rxs = &sc->sc_rxsoft[i]; if (rxs->rxs_mbuf == NULL) { if ((error = gem_add_rxbuf(sc, i)) != 0) { device_printf(sc->sc_dev, "unable to allocate or map RX buffer %d, " "error = %d\n", i, error); /* * XXX we should attempt to run with fewer * receive buffers instead of just failing. */ gem_rxdrain(sc); return (1); } } else GEM_INIT_RXDESC(sc, i); } sc->sc_rxptr = 0; GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } static u_int gem_ringsize(u_int sz) { switch (sz) { case 32: return (GEM_RING_SZ_32); case 64: return (GEM_RING_SZ_64); case 128: return (GEM_RING_SZ_128); case 256: return (GEM_RING_SZ_256); case 512: return (GEM_RING_SZ_512); case 1024: return (GEM_RING_SZ_1024); case 2048: return (GEM_RING_SZ_2048); case 4096: return (GEM_RING_SZ_4096); case 8192: return (GEM_RING_SZ_8192); default: printf("%s: invalid ring size %d\n", __func__, sz); return (GEM_RING_SZ_32); } } static void gem_init(void *xsc) { struct gem_softc *sc = xsc; GEM_LOCK(sc); gem_init_locked(sc); GEM_UNLOCK(sc); } /* * Initialization of interface; set up initialization block * and transmit/receive descriptor rings. */ static void gem_init_locked(struct gem_softc *sc) { if_t ifp = sc->sc_ifp; uint32_t v; GEM_LOCK_ASSERT(sc, MA_OWNED); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) return; #ifdef GEM_DEBUG CTR2(KTR_GEM, "%s: %s: calling stop", device_get_name(sc->sc_dev), __func__); #endif /* * Initialization sequence. The numbered steps below correspond * to the sequence outlined in section 6.3.5.1 in the Ethernet * Channel Engine manual (part of the PCIO manual). * See also the STP2002-STQ document from Sun Microsystems. */ /* step 1 & 2. Reset the Ethernet Channel. */ gem_stop(ifp, 0); gem_reset(sc); #ifdef GEM_DEBUG CTR2(KTR_GEM, "%s: %s: restarting", device_get_name(sc->sc_dev), __func__); #endif if ((sc->sc_flags & GEM_SERDES) == 0) /* Re-initialize the MIF. */ gem_mifinit(sc); /* step 3. Setup data structures in host memory. */ if (gem_meminit(sc) != 0) return; /* step 4. TX MAC registers & counters */ gem_init_regs(sc); /* step 5. RX MAC registers & counters */ /* step 6 & 7. Program Descriptor Ring Base Addresses. */ /* NOTE: we use only 32-bit DMA addresses here. */ GEM_WRITE_4(sc, GEM_TX_RING_PTR_HI, 0); GEM_WRITE_4(sc, GEM_TX_RING_PTR_LO, GEM_CDTXADDR(sc, 0)); GEM_WRITE_4(sc, GEM_RX_RING_PTR_HI, 0); GEM_WRITE_4(sc, GEM_RX_RING_PTR_LO, GEM_CDRXADDR(sc, 0)); #ifdef GEM_DEBUG CTR3(KTR_GEM, "loading RX ring %lx, TX ring %lx, cddma %lx", GEM_CDRXADDR(sc, 0), GEM_CDTXADDR(sc, 0), sc->sc_cddma); #endif /* step 8. Global Configuration & Interrupt Mask */ /* * Set the internal arbitration to "infinite" bursts of the * maximum length of 31 * 64 bytes so DMA transfers aren't * split up in cache line size chunks. This greatly improves * RX performance. * Enable silicon bug workarounds for the Apple variants. */ GEM_WRITE_4(sc, GEM_CONFIG, GEM_CONFIG_TXDMA_LIMIT | GEM_CONFIG_RXDMA_LIMIT | GEM_CONFIG_BURST_INF | (GEM_IS_APPLE(sc) ? GEM_CONFIG_RONPAULBIT | GEM_CONFIG_BUG2FIX : 0)); GEM_WRITE_4(sc, GEM_INTMASK, ~(GEM_INTR_TX_INTME | GEM_INTR_TX_EMPTY | GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF | GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR #ifdef GEM_DEBUG | GEM_INTR_PCS | GEM_INTR_MIF #endif )); GEM_WRITE_4(sc, GEM_MAC_RX_MASK, GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT); GEM_WRITE_4(sc, GEM_MAC_TX_MASK, GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP | GEM_MAC_TX_PEAK_EXP); #ifdef GEM_DEBUG GEM_WRITE_4(sc, GEM_MAC_CONTROL_MASK, ~(GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME)); #else GEM_WRITE_4(sc, GEM_MAC_CONTROL_MASK, GEM_MAC_PAUSED | GEM_MAC_PAUSE | GEM_MAC_RESUME); #endif /* step 9. ETX Configuration: use mostly default values. */ /* Enable DMA. */ v = gem_ringsize(GEM_NTXDESC); /* Set TX FIFO threshold and enable DMA. */ v |= (0x4ff << 10) & GEM_TX_CONFIG_TXFIFO_TH; GEM_WRITE_4(sc, GEM_TX_CONFIG, v | GEM_TX_CONFIG_TXDMA_EN); /* step 10. ERX Configuration */ /* Encode Receive Descriptor ring size. */ v = gem_ringsize(GEM_NRXDESC /* XXX */); /* RX TCP/UDP checksum offset */ v |= ((ETHER_HDR_LEN + sizeof(struct ip)) << GEM_RX_CONFIG_CXM_START_SHFT); /* Set RX FIFO threshold, set first byte offset and enable DMA. */ GEM_WRITE_4(sc, GEM_RX_CONFIG, v | (GEM_THRSH_1024 << GEM_RX_CONFIG_FIFO_THRS_SHIFT) | (ETHER_ALIGN << GEM_RX_CONFIG_FBOFF_SHFT) | GEM_RX_CONFIG_RXDMA_EN); GEM_WRITE_4(sc, GEM_RX_BLANKING, ((6 * (sc->sc_flags & GEM_PCI66) != 0 ? 2 : 1) << GEM_RX_BLANKING_TIME_SHIFT) | 6); /* * The following value is for an OFF Threshold of about 3/4 full * and an ON Threshold of 1/4 full. */ GEM_WRITE_4(sc, GEM_RX_PAUSE_THRESH, (3 * sc->sc_rxfifosize / 256) | ((sc->sc_rxfifosize / 256) << 12)); /* step 11. Configure Media. */ /* step 12. RX_MAC Configuration Register */ v = GEM_READ_4(sc, GEM_MAC_RX_CONFIG); v &= ~GEM_MAC_RX_ENABLE; v |= GEM_MAC_RX_STRIP_CRC; sc->sc_mac_rxcfg = v; /* * Clear the RX filter and reprogram it. This will also set the * current RX MAC configuration and enable it. */ gem_setladrf(sc); /* step 13. TX_MAC Configuration Register */ v = GEM_READ_4(sc, GEM_MAC_TX_CONFIG); v |= GEM_MAC_TX_ENABLE; (void)gem_disable_tx(sc); GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, v); /* step 14. Issue Transmit Pending command. */ /* step 15. Give the receiver a swift kick. */ GEM_WRITE_4(sc, GEM_RX_KICK, GEM_NRXDESC - 4); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); mii_mediachg(sc->sc_mii); /* Start the one second timer. */ sc->sc_wdog_timer = 0; callout_reset(&sc->sc_tick_ch, hz, gem_tick, sc); } static int gem_load_txmbuf(struct gem_softc *sc, struct mbuf **m_head) { bus_dma_segment_t txsegs[GEM_NTXSEGS]; struct gem_txsoft *txs; struct ip *ip; struct mbuf *m; uint64_t cflags, flags; int error, nexttx, nsegs, offset, seg; GEM_LOCK_ASSERT(sc, MA_OWNED); /* Get a work queue entry. */ if ((txs = STAILQ_FIRST(&sc->sc_txfreeq)) == NULL) { /* Ran out of descriptors. */ return (ENOBUFS); } cflags = 0; if (((*m_head)->m_pkthdr.csum_flags & sc->sc_csum_features) != 0) { if (M_WRITABLE(*m_head) == 0) { m = m_dup(*m_head, M_NOWAIT); m_freem(*m_head); *m_head = m; if (m == NULL) return (ENOBUFS); } offset = sizeof(struct ether_header); m = m_pullup(*m_head, offset + sizeof(struct ip)); if (m == NULL) { *m_head = NULL; return (ENOBUFS); } ip = (struct ip *)(mtod(m, caddr_t) + offset); offset += (ip->ip_hl << 2); cflags = offset << GEM_TD_CXSUM_STARTSHFT | ((offset + m->m_pkthdr.csum_data) << GEM_TD_CXSUM_STUFFSHFT) | GEM_TD_CXSUM_ENABLE; *m_head = m; } error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(*m_head, M_NOWAIT, GEM_NTXSEGS); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOBUFS); } *m_head = m; error = bus_dmamap_load_mbuf_sg(sc->sc_tdmatag, txs->txs_dmamap, *m_head, txsegs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*m_head); *m_head = NULL; return (error); } } else if (error != 0) return (error); /* If nsegs is wrong then the stack is corrupt. */ KASSERT(nsegs <= GEM_NTXSEGS, ("%s: too many DMA segments (%d)", __func__, nsegs)); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } /* * Ensure we have enough descriptors free to describe * the packet. Note, we always reserve one descriptor * at the end of the ring as a termination point, in * order to prevent wrap-around. */ if (nsegs > sc->sc_txfree - 1) { txs->txs_ndescs = 0; bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); return (ENOBUFS); } txs->txs_ndescs = nsegs; txs->txs_firstdesc = sc->sc_txnext; nexttx = txs->txs_firstdesc; for (seg = 0; seg < nsegs; seg++, nexttx = GEM_NEXTTX(nexttx)) { #ifdef GEM_DEBUG CTR6(KTR_GEM, "%s: mapping seg %d (txd %d), len %lx, addr %#lx (%#lx)", __func__, seg, nexttx, txsegs[seg].ds_len, txsegs[seg].ds_addr, htole64(txsegs[seg].ds_addr)); #endif sc->sc_txdescs[nexttx].gd_addr = htole64(txsegs[seg].ds_addr); KASSERT(txsegs[seg].ds_len < GEM_TD_BUFSIZE, ("%s: segment size too large!", __func__)); flags = txsegs[seg].ds_len & GEM_TD_BUFSIZE; sc->sc_txdescs[nexttx].gd_flags = htole64(flags | cflags); txs->txs_lastdesc = nexttx; } /* Set EOP on the last descriptor. */ #ifdef GEM_DEBUG CTR3(KTR_GEM, "%s: end of packet at segment %d, TX %d", __func__, seg, nexttx); #endif sc->sc_txdescs[txs->txs_lastdesc].gd_flags |= htole64(GEM_TD_END_OF_PACKET); /* Lastly set SOP on the first descriptor. */ #ifdef GEM_DEBUG CTR3(KTR_GEM, "%s: start of packet at segment %d, TX %d", __func__, seg, nexttx); #endif if (++sc->sc_txwin > GEM_NTXSEGS * 2 / 3) { sc->sc_txwin = 0; sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= htole64(GEM_TD_INTERRUPT_ME | GEM_TD_START_OF_PACKET); } else sc->sc_txdescs[txs->txs_firstdesc].gd_flags |= htole64(GEM_TD_START_OF_PACKET); /* Sync the DMA map. */ bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_PREWRITE); #ifdef GEM_DEBUG CTR4(KTR_GEM, "%s: setting firstdesc=%d, lastdesc=%d, ndescs=%d", __func__, txs->txs_firstdesc, txs->txs_lastdesc, txs->txs_ndescs); #endif STAILQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); STAILQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); txs->txs_mbuf = *m_head; sc->sc_txnext = GEM_NEXTTX(txs->txs_lastdesc); sc->sc_txfree -= txs->txs_ndescs; return (0); } static void gem_init_regs(struct gem_softc *sc) { const u_char *laddr = if_getlladdr(sc->sc_ifp); GEM_LOCK_ASSERT(sc, MA_OWNED); /* These registers are not cleared on reset. */ if ((sc->sc_flags & GEM_INITED) == 0) { /* magic values */ GEM_WRITE_4(sc, GEM_MAC_IPG0, 0); GEM_WRITE_4(sc, GEM_MAC_IPG1, 8); GEM_WRITE_4(sc, GEM_MAC_IPG2, 4); /* min frame length */ GEM_WRITE_4(sc, GEM_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); /* max frame length and max burst size */ GEM_WRITE_4(sc, GEM_MAC_MAC_MAX_FRAME, (ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) | (0x2000 << 16)); /* more magic values */ GEM_WRITE_4(sc, GEM_MAC_PREAMBLE_LEN, 0x7); GEM_WRITE_4(sc, GEM_MAC_JAM_SIZE, 0x4); GEM_WRITE_4(sc, GEM_MAC_ATTEMPT_LIMIT, 0x10); GEM_WRITE_4(sc, GEM_MAC_CONTROL_TYPE, 0x8808); /* random number seed */ GEM_WRITE_4(sc, GEM_MAC_RANDOM_SEED, ((laddr[5] << 8) | laddr[4]) & 0x3ff); /* secondary MAC address: 0:0:0:0:0:0 */ GEM_WRITE_4(sc, GEM_MAC_ADDR3, 0); GEM_WRITE_4(sc, GEM_MAC_ADDR4, 0); GEM_WRITE_4(sc, GEM_MAC_ADDR5, 0); /* MAC control address: 01:80:c2:00:00:01 */ GEM_WRITE_4(sc, GEM_MAC_ADDR6, 0x0001); GEM_WRITE_4(sc, GEM_MAC_ADDR7, 0xc200); GEM_WRITE_4(sc, GEM_MAC_ADDR8, 0x0180); /* MAC filter address: 0:0:0:0:0:0 */ GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER0, 0); GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER1, 0); GEM_WRITE_4(sc, GEM_MAC_ADDR_FILTER2, 0); GEM_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK1_2, 0); GEM_WRITE_4(sc, GEM_MAC_ADR_FLT_MASK0, 0); sc->sc_flags |= GEM_INITED; } /* Counters need to be zeroed. */ GEM_WRITE_4(sc, GEM_MAC_NORM_COLL_CNT, 0); GEM_WRITE_4(sc, GEM_MAC_FIRST_COLL_CNT, 0); GEM_WRITE_4(sc, GEM_MAC_EXCESS_COLL_CNT, 0); GEM_WRITE_4(sc, GEM_MAC_LATE_COLL_CNT, 0); GEM_WRITE_4(sc, GEM_MAC_DEFER_TMR_CNT, 0); GEM_WRITE_4(sc, GEM_MAC_PEAK_ATTEMPTS, 0); GEM_WRITE_4(sc, GEM_MAC_RX_FRAME_COUNT, 0); GEM_WRITE_4(sc, GEM_MAC_RX_LEN_ERR_CNT, 0); GEM_WRITE_4(sc, GEM_MAC_RX_ALIGN_ERR, 0); GEM_WRITE_4(sc, GEM_MAC_RX_CRC_ERR_CNT, 0); GEM_WRITE_4(sc, GEM_MAC_RX_CODE_VIOL, 0); /* Set XOFF PAUSE time. */ GEM_WRITE_4(sc, GEM_MAC_SEND_PAUSE_CMD, 0x1BF0); /* Set the station address. */ GEM_WRITE_4(sc, GEM_MAC_ADDR0, (laddr[4] << 8) | laddr[5]); GEM_WRITE_4(sc, GEM_MAC_ADDR1, (laddr[2] << 8) | laddr[3]); GEM_WRITE_4(sc, GEM_MAC_ADDR2, (laddr[0] << 8) | laddr[1]); /* Enable MII outputs. */ GEM_WRITE_4(sc, GEM_MAC_XIF_CONFIG, GEM_MAC_XIF_TX_MII_ENA); } static void gem_start(if_t ifp) { struct gem_softc *sc = if_getsoftc(ifp); GEM_LOCK(sc); gem_start_locked(ifp); GEM_UNLOCK(sc); } static inline void gem_txkick(struct gem_softc *sc) { /* * Update the TX kick register. This register has to point to the * descriptor after the last valid one and for optimum performance * should be incremented in multiples of 4 (the DMA engine fetches/ * updates descriptors in batches of 4). */ #ifdef GEM_DEBUG CTR3(KTR_GEM, "%s: %s: kicking TX %d", device_get_name(sc->sc_dev), __func__, sc->sc_txnext); #endif GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); GEM_WRITE_4(sc, GEM_TX_KICK, sc->sc_txnext); } static void gem_start_locked(if_t ifp) { struct gem_softc *sc = if_getsoftc(ifp); struct mbuf *m; int kicked, ntx; GEM_LOCK_ASSERT(sc, MA_OWNED); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->sc_flags & GEM_LINK) == 0) return; #ifdef GEM_DEBUG CTR4(KTR_GEM, "%s: %s: txfree %d, txnext %d", device_get_name(sc->sc_dev), __func__, sc->sc_txfree, sc->sc_txnext); #endif ntx = 0; kicked = 0; for (; !if_sendq_empty(ifp) && sc->sc_txfree > 1;) { m = if_dequeue(ifp); if (m == NULL) break; if (gem_load_txmbuf(sc, &m) != 0) { if (m == NULL) break; if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); if_sendq_prepend(ifp, m); break; } if ((sc->sc_txnext % 4) == 0) { gem_txkick(sc); kicked = 1; } else kicked = 0; ntx++; BPF_MTAP(ifp, m); } if (ntx > 0) { if (kicked == 0) gem_txkick(sc); #ifdef GEM_DEBUG CTR2(KTR_GEM, "%s: packets enqueued, OWN on %d", device_get_name(sc->sc_dev), sc->sc_txnext); #endif /* Set a watchdog timer in case the chip flakes out. */ sc->sc_wdog_timer = 5; #ifdef GEM_DEBUG CTR3(KTR_GEM, "%s: %s: watchdog %d", device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); #endif } } static void gem_tint(struct gem_softc *sc) { if_t ifp = sc->sc_ifp; struct gem_txsoft *txs; int progress; uint32_t txlast; #ifdef GEM_DEBUG int i; GEM_LOCK_ASSERT(sc, MA_OWNED); CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); #endif /* * Go through our TX list and free mbufs for those * frames that have been transmitted. */ progress = 0; GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD); while ((txs = STAILQ_FIRST(&sc->sc_txdirtyq)) != NULL) { #ifdef GEM_DEBUG if ((if_getflags(ifp) & IFF_DEBUG) != 0) { printf(" txsoft %p transmit chain:\n", txs); for (i = txs->txs_firstdesc;; i = GEM_NEXTTX(i)) { printf("descriptor %d: ", i); printf("gd_flags: 0x%016llx\t", (long long)le64toh( sc->sc_txdescs[i].gd_flags)); printf("gd_addr: 0x%016llx\n", (long long)le64toh( sc->sc_txdescs[i].gd_addr)); if (i == txs->txs_lastdesc) break; } } #endif /* * In theory, we could harvest some descriptors before * the ring is empty, but that's a bit complicated. * * GEM_TX_COMPLETION points to the last descriptor * processed + 1. */ txlast = GEM_READ_4(sc, GEM_TX_COMPLETION); #ifdef GEM_DEBUG CTR4(KTR_GEM, "%s: txs->txs_firstdesc = %d, " "txs->txs_lastdesc = %d, txlast = %d", __func__, txs->txs_firstdesc, txs->txs_lastdesc, txlast); #endif if (txs->txs_firstdesc <= txs->txs_lastdesc) { if ((txlast >= txs->txs_firstdesc) && (txlast <= txs->txs_lastdesc)) break; } else { /* Ick -- this command wraps. */ if ((txlast >= txs->txs_firstdesc) || (txlast <= txs->txs_lastdesc)) break; } #ifdef GEM_DEBUG CTR1(KTR_GEM, "%s: releasing a descriptor", __func__); #endif STAILQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); sc->sc_txfree += txs->txs_ndescs; bus_dmamap_sync(sc->sc_tdmatag, txs->txs_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->sc_tdmatag, txs->txs_dmamap); if (txs->txs_mbuf != NULL) { m_freem(txs->txs_mbuf); txs->txs_mbuf = NULL; } STAILQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); progress = 1; } #ifdef GEM_DEBUG CTR4(KTR_GEM, "%s: GEM_TX_STATE_MACHINE %x GEM_TX_DATA_PTR %llx " "GEM_TX_COMPLETION %x", __func__, GEM_READ_4(sc, GEM_TX_STATE_MACHINE), ((long long)GEM_READ_4(sc, GEM_TX_DATA_PTR_HI) << 32) | GEM_READ_4(sc, GEM_TX_DATA_PTR_LO), GEM_READ_4(sc, GEM_TX_COMPLETION)); #endif if (progress) { if (sc->sc_txfree == GEM_NTXDESC - 1) sc->sc_txwin = 0; /* * We freed some descriptors, so reset IFF_DRV_OACTIVE * and restart. */ if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); if (STAILQ_EMPTY(&sc->sc_txdirtyq)) sc->sc_wdog_timer = 0; gem_start_locked(ifp); } #ifdef GEM_DEBUG CTR3(KTR_GEM, "%s: %s: watchdog %d", device_get_name(sc->sc_dev), __func__, sc->sc_wdog_timer); #endif } #ifdef GEM_RINT_TIMEOUT static void gem_rint_timeout(void *arg) { struct gem_softc *sc = arg; GEM_LOCK_ASSERT(sc, MA_OWNED); gem_rint(sc); } #endif static void gem_rint(struct gem_softc *sc) { if_t ifp = sc->sc_ifp; struct mbuf *m; uint64_t rxstat; uint32_t rxcomp; GEM_LOCK_ASSERT(sc, MA_OWNED); #ifdef GEM_RINT_TIMEOUT callout_stop(&sc->sc_rx_ch); #endif #ifdef GEM_DEBUG CTR2(KTR_GEM, "%s: %s", device_get_name(sc->sc_dev), __func__); #endif /* * Read the completion register once. This limits * how long the following loop can execute. */ rxcomp = GEM_READ_4(sc, GEM_RX_COMPLETION); #ifdef GEM_DEBUG CTR3(KTR_GEM, "%s: sc->sc_rxptr %d, complete %d", __func__, sc->sc_rxptr, rxcomp); #endif GEM_CDSYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); for (; sc->sc_rxptr != rxcomp;) { m = sc->sc_rxsoft[sc->sc_rxptr].rxs_mbuf; rxstat = le64toh(sc->sc_rxdescs[sc->sc_rxptr].gd_flags); if (rxstat & GEM_RD_OWN) { #ifdef GEM_RINT_TIMEOUT /* * The descriptor is still marked as owned, although * it is supposed to have completed. This has been * observed on some machines. Just exiting here * might leave the packet sitting around until another * one arrives to trigger a new interrupt, which is * generally undesirable, so set up a timeout. */ callout_reset(&sc->sc_rx_ch, GEM_RXOWN_TICKS, gem_rint_timeout, sc); #endif m = NULL; goto kickit; } if (rxstat & GEM_RD_BAD_CRC) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); device_printf(sc->sc_dev, "receive error: CRC error\n"); GEM_INIT_RXDESC(sc, sc->sc_rxptr); m = NULL; goto kickit; } #ifdef GEM_DEBUG if ((if_getflags(ifp) & IFF_DEBUG) != 0) { printf(" rxsoft %p descriptor %d: ", &sc->sc_rxsoft[sc->sc_rxptr], sc->sc_rxptr); printf("gd_flags: 0x%016llx\t", (long long)le64toh( sc->sc_rxdescs[sc->sc_rxptr].gd_flags)); printf("gd_addr: 0x%016llx\n", (long long)le64toh( sc->sc_rxdescs[sc->sc_rxptr].gd_addr)); } #endif /* * Allocate a new mbuf cluster. If that fails, we are * out of memory, and must drop the packet and recycle * the buffer that's already attached to this descriptor. */ if (gem_add_rxbuf(sc, sc->sc_rxptr) != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); GEM_INIT_RXDESC(sc, sc->sc_rxptr); m = NULL; } kickit: /* * Update the RX kick register. This register has to point * to the descriptor after the last valid one (before the * current batch) and for optimum performance should be * incremented in multiples of 4 (the DMA engine fetches/ * updates descriptors in batches of 4). */ sc->sc_rxptr = GEM_NEXTRX(sc->sc_rxptr); if ((sc->sc_rxptr % 4) == 0) { GEM_CDSYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); GEM_WRITE_4(sc, GEM_RX_KICK, (sc->sc_rxptr + GEM_NRXDESC - 4) & GEM_NRXDESC_MASK); } if (m == NULL) { if (rxstat & GEM_RD_OWN) break; continue; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_data += ETHER_ALIGN; /* first byte offset */ m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = GEM_RD_BUFLEN(rxstat); if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) gem_rxcksum(m, rxstat); /* Pass it on. */ GEM_UNLOCK(sc); if_input(ifp, m); GEM_LOCK(sc); } #ifdef GEM_DEBUG CTR3(KTR_GEM, "%s: done sc->sc_rxptr %d, complete %d", __func__, sc->sc_rxptr, GEM_READ_4(sc, GEM_RX_COMPLETION)); #endif } static int gem_add_rxbuf(struct gem_softc *sc, int idx) { struct gem_rxsoft *rxs = &sc->sc_rxsoft[idx]; struct mbuf *m; bus_dma_segment_t segs[1]; int error, nsegs; GEM_LOCK_ASSERT(sc, MA_OWNED); m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; #ifdef GEM_DEBUG /* Bzero the packet to check DMA. */ memset(m->m_ext.ext_buf, 0, m->m_ext.ext_size); #endif if (rxs->rxs_mbuf != NULL) { bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->sc_rdmatag, rxs->rxs_dmamap); } error = bus_dmamap_load_mbuf_sg(sc->sc_rdmatag, rxs->rxs_dmamap, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { device_printf(sc->sc_dev, "cannot load RS DMA map %d, error = %d\n", idx, error); m_freem(m); return (error); } /* If nsegs is wrong then the stack is corrupt. */ KASSERT(nsegs == 1, ("%s: too many DMA segments (%d)", __func__, nsegs)); rxs->rxs_mbuf = m; rxs->rxs_paddr = segs[0].ds_addr; bus_dmamap_sync(sc->sc_rdmatag, rxs->rxs_dmamap, BUS_DMASYNC_PREREAD); GEM_INIT_RXDESC(sc, idx); return (0); } static void gem_eint(struct gem_softc *sc, u_int status) { if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); if ((status & GEM_INTR_RX_TAG_ERR) != 0) { gem_reset_rxdma(sc); return; } device_printf(sc->sc_dev, "%s: status 0x%x", __func__, status); if ((status & GEM_INTR_BERR) != 0) { printf(", PCI bus error 0x%x", GEM_READ_4(sc, GEM_PCI_ERROR_STATUS)); } printf("\n"); } void gem_intr(void *v) { struct gem_softc *sc = v; uint32_t status, status2; GEM_LOCK(sc); status = GEM_READ_4(sc, GEM_STATUS); #ifdef GEM_DEBUG CTR4(KTR_GEM, "%s: %s: cplt %x, status %x", device_get_name(sc->sc_dev), __func__, (status >> GEM_STATUS_TX_COMPLETION_SHFT), (u_int)status); /* * PCS interrupts must be cleared, otherwise no traffic is passed! */ if ((status & GEM_INTR_PCS) != 0) { status2 = GEM_READ_4(sc, GEM_MII_INTERRUP_STATUS) | GEM_READ_4(sc, GEM_MII_INTERRUP_STATUS); if ((status2 & GEM_MII_INTERRUP_LINK) != 0) device_printf(sc->sc_dev, "%s: PCS link status changed\n", __func__); } if ((status & GEM_MAC_CONTROL_STATUS) != 0) { status2 = GEM_READ_4(sc, GEM_MAC_CONTROL_STATUS); if ((status2 & GEM_MAC_PAUSED) != 0) device_printf(sc->sc_dev, "%s: PAUSE received (PAUSE time %d slots)\n", __func__, GEM_MAC_PAUSE_TIME(status2)); if ((status2 & GEM_MAC_PAUSE) != 0) device_printf(sc->sc_dev, "%s: transited to PAUSE state\n", __func__); if ((status2 & GEM_MAC_RESUME) != 0) device_printf(sc->sc_dev, "%s: transited to non-PAUSE state\n", __func__); } if ((status & GEM_INTR_MIF) != 0) device_printf(sc->sc_dev, "%s: MIF interrupt\n", __func__); #endif if (__predict_false(status & (GEM_INTR_RX_TAG_ERR | GEM_INTR_PERR | GEM_INTR_BERR)) != 0) gem_eint(sc, status); if ((status & (GEM_INTR_RX_DONE | GEM_INTR_RX_NOBUF)) != 0) gem_rint(sc); if ((status & (GEM_INTR_TX_EMPTY | GEM_INTR_TX_INTME)) != 0) gem_tint(sc); if (__predict_false((status & GEM_INTR_TX_MAC) != 0)) { status2 = GEM_READ_4(sc, GEM_MAC_TX_STATUS); if ((status2 & ~(GEM_MAC_TX_XMIT_DONE | GEM_MAC_TX_DEFER_EXP | GEM_MAC_TX_PEAK_EXP)) != 0) device_printf(sc->sc_dev, "MAC TX fault, status %x\n", status2); if ((status2 & (GEM_MAC_TX_UNDERRUN | GEM_MAC_TX_PKT_TOO_LONG)) != 0) { if_inc_counter(sc->sc_ifp, IFCOUNTER_OERRORS, 1); if_setdrvflagbits(sc->sc_ifp, 0, IFF_DRV_RUNNING); gem_init_locked(sc); } } if (__predict_false((status & GEM_INTR_RX_MAC) != 0)) { status2 = GEM_READ_4(sc, GEM_MAC_RX_STATUS); /* * At least with GEM_SUN_GEM revisions GEM_MAC_RX_OVERFLOW * happen often due to a silicon bug so handle them silently. * Moreover, it's likely that the receiver has hung so we * reset it. */ if ((status2 & GEM_MAC_RX_OVERFLOW) != 0) { if_inc_counter(sc->sc_ifp, IFCOUNTER_IERRORS, 1); gem_reset_rxdma(sc); } else if ((status2 & ~(GEM_MAC_RX_DONE | GEM_MAC_RX_FRAME_CNT)) != 0) device_printf(sc->sc_dev, "MAC RX fault, status %x\n", status2); } GEM_UNLOCK(sc); } static int gem_watchdog(struct gem_softc *sc) { if_t ifp = sc->sc_ifp; GEM_LOCK_ASSERT(sc, MA_OWNED); #ifdef GEM_DEBUG CTR4(KTR_GEM, "%s: GEM_RX_CONFIG %x GEM_MAC_RX_STATUS %x GEM_MAC_RX_CONFIG %x", __func__, GEM_READ_4(sc, GEM_RX_CONFIG), GEM_READ_4(sc, GEM_MAC_RX_STATUS), GEM_READ_4(sc, GEM_MAC_RX_CONFIG)); CTR4(KTR_GEM, "%s: GEM_TX_CONFIG %x GEM_MAC_TX_STATUS %x GEM_MAC_TX_CONFIG %x", __func__, GEM_READ_4(sc, GEM_TX_CONFIG), GEM_READ_4(sc, GEM_MAC_TX_STATUS), GEM_READ_4(sc, GEM_MAC_TX_CONFIG)); #endif if (sc->sc_wdog_timer == 0 || --sc->sc_wdog_timer != 0) return (0); if ((sc->sc_flags & GEM_LINK) != 0) device_printf(sc->sc_dev, "device timeout\n"); else if (bootverbose) device_printf(sc->sc_dev, "device timeout (no link)\n"); if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); /* Try to get more packets going. */ if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); gem_init_locked(sc); gem_start_locked(ifp); return (EJUSTRETURN); } static void gem_mifinit(struct gem_softc *sc) { /* Configure the MIF in frame mode. */ GEM_WRITE_4(sc, GEM_MIF_CONFIG, GEM_READ_4(sc, GEM_MIF_CONFIG) & ~GEM_MIF_CONFIG_BB_ENA); GEM_BARRIER(sc, GEM_MIF_CONFIG, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } /* * MII interface * * The MII interface supports at least three different operating modes: * * Bitbang mode is implemented using data, clock and output enable registers. * * Frame mode is implemented by loading a complete frame into the frame * register and polling the valid bit for completion. * * Polling mode uses the frame register but completion is indicated by * an interrupt. * */ int gem_mii_readreg(device_t dev, int phy, int reg) { struct gem_softc *sc; int n; uint32_t v; #ifdef GEM_DEBUG_PHY printf("%s: phy %d reg %d\n", __func__, phy, reg); #endif sc = device_get_softc(dev); if ((sc->sc_flags & GEM_SERDES) != 0) { switch (reg) { case MII_BMCR: reg = GEM_MII_CONTROL; break; case MII_BMSR: reg = GEM_MII_STATUS; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); case MII_ANAR: reg = GEM_MII_ANAR; break; case MII_ANLPAR: reg = GEM_MII_ANLPAR; break; case MII_EXTSR: return (EXTSR_1000XFDX | EXTSR_1000XHDX); default: device_printf(sc->sc_dev, "%s: unhandled register %d\n", __func__, reg); return (0); } return (GEM_READ_4(sc, reg)); } /* Construct the frame command. */ v = GEM_MIF_FRAME_READ | (phy << GEM_MIF_PHY_SHIFT) | (reg << GEM_MIF_REG_SHIFT); GEM_WRITE_4(sc, GEM_MIF_FRAME, v); GEM_BARRIER(sc, GEM_MIF_FRAME, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); for (n = 0; n < 100; n++) { DELAY(1); v = GEM_READ_4(sc, GEM_MIF_FRAME); if (v & GEM_MIF_FRAME_TA0) return (v & GEM_MIF_FRAME_DATA); } device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (0); } int gem_mii_writereg(device_t dev, int phy, int reg, int val) { struct gem_softc *sc; int n; uint32_t v; #ifdef GEM_DEBUG_PHY printf("%s: phy %d reg %d val %x\n", phy, reg, val, __func__); #endif sc = device_get_softc(dev); if ((sc->sc_flags & GEM_SERDES) != 0) { switch (reg) { case MII_BMSR: reg = GEM_MII_STATUS; break; case MII_BMCR: reg = GEM_MII_CONTROL; if ((val & GEM_MII_CONTROL_RESET) == 0) break; GEM_WRITE_4(sc, GEM_MII_CONTROL, val); GEM_BARRIER(sc, GEM_MII_CONTROL, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!gem_bitwait(sc, GEM_MII_CONTROL, GEM_MII_CONTROL_RESET, 0)) device_printf(sc->sc_dev, "cannot reset PCS\n"); /* FALLTHROUGH */ case MII_ANAR: GEM_WRITE_4(sc, GEM_MII_CONFIG, 0); GEM_BARRIER(sc, GEM_MII_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); GEM_WRITE_4(sc, GEM_MII_ANAR, val); GEM_BARRIER(sc, GEM_MII_ANAR, 4, BUS_SPACE_BARRIER_WRITE); GEM_WRITE_4(sc, GEM_MII_SLINK_CONTROL, GEM_MII_SLINK_LOOPBACK | GEM_MII_SLINK_EN_SYNC_D); GEM_BARRIER(sc, GEM_MII_SLINK_CONTROL, 4, BUS_SPACE_BARRIER_WRITE); GEM_WRITE_4(sc, GEM_MII_CONFIG, GEM_MII_CONFIG_ENABLE); GEM_BARRIER(sc, GEM_MII_CONFIG, 4, BUS_SPACE_BARRIER_WRITE); return (0); case MII_ANLPAR: reg = GEM_MII_ANLPAR; break; default: device_printf(sc->sc_dev, "%s: unhandled register %d\n", __func__, reg); return (0); } GEM_WRITE_4(sc, reg, val); GEM_BARRIER(sc, reg, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (0); } /* Construct the frame command. */ v = GEM_MIF_FRAME_WRITE | (phy << GEM_MIF_PHY_SHIFT) | (reg << GEM_MIF_REG_SHIFT) | (val & GEM_MIF_FRAME_DATA); GEM_WRITE_4(sc, GEM_MIF_FRAME, v); GEM_BARRIER(sc, GEM_MIF_FRAME, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); for (n = 0; n < 100; n++) { DELAY(1); v = GEM_READ_4(sc, GEM_MIF_FRAME); if (v & GEM_MIF_FRAME_TA0) return (1); } device_printf(sc->sc_dev, "%s: timed out\n", __func__); return (0); } void gem_mii_statchg(device_t dev) { struct gem_softc *sc; int gigabit; uint32_t rxcfg, txcfg, v; sc = device_get_softc(dev); GEM_LOCK_ASSERT(sc, MA_OWNED); #ifdef GEM_DEBUG if ((sc->sc_if_getflags(ifp) & IFF_DEBUG) != 0) device_printf(sc->sc_dev, "%s: status change\n", __func__); #endif if ((sc->sc_mii->mii_media_status & IFM_ACTIVE) != 0 && IFM_SUBTYPE(sc->sc_mii->mii_media_active) != IFM_NONE) sc->sc_flags |= GEM_LINK; else sc->sc_flags &= ~GEM_LINK; switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { case IFM_1000_SX: case IFM_1000_LX: case IFM_1000_CX: case IFM_1000_T: gigabit = 1; break; default: gigabit = 0; } /* * The configuration done here corresponds to the steps F) and * G) and as far as enabling of RX and TX MAC goes also step H) * of the initialization sequence outlined in section 3.2.1 of * the GEM Gigabit Ethernet ASIC Specification. */ rxcfg = sc->sc_mac_rxcfg; rxcfg &= ~GEM_MAC_RX_CARR_EXTEND; txcfg = GEM_MAC_TX_ENA_IPG0 | GEM_MAC_TX_NGU | GEM_MAC_TX_NGU_LIMIT; if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) txcfg |= GEM_MAC_TX_IGN_CARRIER | GEM_MAC_TX_IGN_COLLIS; else if (gigabit != 0) { rxcfg |= GEM_MAC_RX_CARR_EXTEND; txcfg |= GEM_MAC_TX_CARR_EXTEND; } (void)gem_disable_tx(sc); GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg); (void)gem_disable_rx(sc); GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg); v = GEM_READ_4(sc, GEM_MAC_CONTROL_CONFIG) & ~(GEM_MAC_CC_RX_PAUSE | GEM_MAC_CC_TX_PAUSE); if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_RXPAUSE) != 0) v |= GEM_MAC_CC_RX_PAUSE; if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_ETH_TXPAUSE) != 0) v |= GEM_MAC_CC_TX_PAUSE; GEM_WRITE_4(sc, GEM_MAC_CONTROL_CONFIG, v); if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0 && gigabit != 0) GEM_WRITE_4(sc, GEM_MAC_SLOT_TIME, GEM_MAC_SLOT_TIME_CARR_EXTEND); else GEM_WRITE_4(sc, GEM_MAC_SLOT_TIME, GEM_MAC_SLOT_TIME_NORMAL); /* XIF Configuration */ v = GEM_MAC_XIF_LINK_LED; v |= GEM_MAC_XIF_TX_MII_ENA; if ((sc->sc_flags & GEM_SERDES) == 0) { if ((GEM_READ_4(sc, GEM_MIF_CONFIG) & GEM_MIF_CONFIG_PHY_SEL) != 0) { /* External MII needs echo disable if half duplex. */ if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) == 0) v |= GEM_MAC_XIF_ECHO_DISABL; } else /* * Internal MII needs buffer enable. * XXX buffer enable makes only sense for an * external PHY. */ v |= GEM_MAC_XIF_MII_BUF_ENA; } if (gigabit != 0) v |= GEM_MAC_XIF_GMII_MODE; if ((IFM_OPTIONS(sc->sc_mii->mii_media_active) & IFM_FDX) != 0) v |= GEM_MAC_XIF_FDPLX_LED; GEM_WRITE_4(sc, GEM_MAC_XIF_CONFIG, v); sc->sc_mac_rxcfg = rxcfg; if ((if_getdrvflags(sc->sc_ifp) & IFF_DRV_RUNNING) != 0 && (sc->sc_flags & GEM_LINK) != 0) { GEM_WRITE_4(sc, GEM_MAC_TX_CONFIG, txcfg | GEM_MAC_TX_ENABLE); GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, rxcfg | GEM_MAC_RX_ENABLE); } } int gem_mediachange(if_t ifp) { struct gem_softc *sc = if_getsoftc(ifp); int error; /* XXX add support for serial media. */ GEM_LOCK(sc); error = mii_mediachg(sc->sc_mii); GEM_UNLOCK(sc); return (error); } void gem_mediastatus(if_t ifp, struct ifmediareq *ifmr) { struct gem_softc *sc = if_getsoftc(ifp); GEM_LOCK(sc); if ((if_getflags(ifp) & IFF_UP) == 0) { GEM_UNLOCK(sc); return; } mii_pollstat(sc->sc_mii); ifmr->ifm_active = sc->sc_mii->mii_media_active; ifmr->ifm_status = sc->sc_mii->mii_media_status; GEM_UNLOCK(sc); } static int gem_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct gem_softc *sc = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; int error; error = 0; switch (cmd) { case SIOCSIFFLAGS: GEM_LOCK(sc); if ((if_getflags(ifp) & IFF_UP) != 0) { if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0 && ((if_getflags(ifp) ^ sc->sc_ifflags) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) gem_setladrf(sc); else gem_init_locked(sc); } else if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) gem_stop(ifp, 0); if ((if_getflags(ifp) & IFF_LINK0) != 0) sc->sc_csum_features |= CSUM_UDP; else sc->sc_csum_features &= ~CSUM_UDP; if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) if_sethwassist(ifp, sc->sc_csum_features); sc->sc_ifflags = if_getflags(ifp); GEM_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: GEM_LOCK(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) != 0) gem_setladrf(sc); GEM_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, cmd); break; case SIOCSIFCAP: GEM_LOCK(sc); if_setcapenable(ifp, ifr->ifr_reqcap); if ((if_getcapenable(ifp) & IFCAP_TXCSUM) != 0) if_sethwassist(ifp, sc->sc_csum_features); else if_sethwassist(ifp, 0); GEM_UNLOCK(sc); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static u_int gem_hash_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { uint32_t crc, *hash = arg; crc = ether_crc32_le(LLADDR(sdl), ETHER_ADDR_LEN); /* We just want the 8 most significant bits. */ crc >>= 24; /* Set the corresponding bit in the filter. */ hash[crc >> 4] |= 1 << (15 - (crc & 15)); return (1); } static void gem_setladrf(struct gem_softc *sc) { if_t ifp = sc->sc_ifp; int i; uint32_t hash[16]; uint32_t v; GEM_LOCK_ASSERT(sc, MA_OWNED); /* * Turn off the RX MAC and the hash filter as required by the Sun GEM * programming restrictions. */ v = sc->sc_mac_rxcfg & ~GEM_MAC_RX_HASH_FILTER; GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, v); GEM_BARRIER(sc, GEM_MAC_RX_CONFIG, 4, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); if (!gem_bitwait(sc, GEM_MAC_RX_CONFIG, GEM_MAC_RX_HASH_FILTER | GEM_MAC_RX_ENABLE, 0)) device_printf(sc->sc_dev, "cannot disable RX MAC or hash filter\n"); v &= ~(GEM_MAC_RX_PROMISCUOUS | GEM_MAC_RX_PROMISC_GRP); if ((if_getflags(ifp) & IFF_PROMISC) != 0) { v |= GEM_MAC_RX_PROMISCUOUS; goto chipit; } if ((if_getflags(ifp) & IFF_ALLMULTI) != 0) { v |= GEM_MAC_RX_PROMISC_GRP; goto chipit; } /* * Set up multicast address filter by passing all multicast * addresses through a crc generator, and then using the high * order 8 bits as an index into the 256 bit logical address * filter. The high order 4 bits selects the word, while the * other 4 bits select the bit within the word (where bit 0 * is the MSB). */ memset(hash, 0, sizeof(hash)); if_foreach_llmaddr(ifp, gem_hash_maddr, hash); v |= GEM_MAC_RX_HASH_FILTER; /* Now load the hash table into the chip (if we are using it). */ for (i = 0; i < 16; i++) GEM_WRITE_4(sc, GEM_MAC_HASH0 + i * (GEM_MAC_HASH1 - GEM_MAC_HASH0), hash[i]); chipit: sc->sc_mac_rxcfg = v; GEM_WRITE_4(sc, GEM_MAC_RX_CONFIG, v | GEM_MAC_RX_ENABLE); } diff --git a/sys/dev/mgb/if_mgb.c b/sys/dev/mgb/if_mgb.c index 05c4c242f739..1240d0f84415 100644 --- a/sys/dev/mgb/if_mgb.c +++ b/sys/dev/mgb/if_mgb.c @@ -1,1616 +1,1615 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 The FreeBSD Foundation * * This driver was written by Gerald ND Aryeetey * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include /* * Microchip LAN7430/LAN7431 PCIe to Gigabit Ethernet Controller driver. * * Product information: * LAN7430 https://www.microchip.com/en-us/product/LAN7430 * - Integrated IEEE 802.3 compliant PHY * LAN7431 https://www.microchip.com/en-us/product/LAN7431 * - RGMII Interface * * This driver uses the iflib interface and the default 'ukphy' PHY driver. * * UNIMPLEMENTED FEATURES * ---------------------- * A number of features supported by LAN743X device are not yet implemented in * this driver: * * - Multiple (up to 4) RX queues support * - Just needs to remove asserts and malloc multiple `rx_ring_data` * structs based on ncpus. * - RX/TX Checksum Offloading support * - VLAN support * - Receive Packet Filtering (Multicast Perfect/Hash Address) support * - Wake on LAN (WoL) support * - TX LSO support * - Receive Side Scaling (RSS) support * - Debugging Capabilities: * - Could include MAC statistics and * error status registers in sysctl. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "ifdi_if.h" #include "miibus_if.h" static const pci_vendor_info_t mgb_vendor_info_array[] = { PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7430_DEVICE_ID, "Microchip LAN7430 PCIe Gigabit Ethernet Controller"), PVID(MGB_MICROCHIP_VENDOR_ID, MGB_LAN7431_DEVICE_ID, "Microchip LAN7431 PCIe Gigabit Ethernet Controller"), PVID_END }; /* Device methods */ static device_register_t mgb_register; /* IFLIB methods */ static ifdi_attach_pre_t mgb_attach_pre; static ifdi_attach_post_t mgb_attach_post; static ifdi_detach_t mgb_detach; static ifdi_tx_queues_alloc_t mgb_tx_queues_alloc; static ifdi_rx_queues_alloc_t mgb_rx_queues_alloc; static ifdi_queues_free_t mgb_queues_free; static ifdi_init_t mgb_init; static ifdi_stop_t mgb_stop; static ifdi_msix_intr_assign_t mgb_msix_intr_assign; static ifdi_tx_queue_intr_enable_t mgb_tx_queue_intr_enable; static ifdi_rx_queue_intr_enable_t mgb_rx_queue_intr_enable; static ifdi_intr_enable_t mgb_intr_enable_all; static ifdi_intr_disable_t mgb_intr_disable_all; /* IFLIB_TXRX methods */ static int mgb_isc_txd_encap(void *, if_pkt_info_t); static void mgb_isc_txd_flush(void *, uint16_t, qidx_t); static int mgb_isc_txd_credits_update(void *, uint16_t, bool); static int mgb_isc_rxd_available(void *, uint16_t, qidx_t, qidx_t); static int mgb_isc_rxd_pkt_get(void *, if_rxd_info_t); static void mgb_isc_rxd_refill(void *, if_rxd_update_t); static void mgb_isc_rxd_flush(void *, uint16_t, uint8_t, qidx_t); /* Interrupts */ static driver_filter_t mgb_legacy_intr; static driver_filter_t mgb_admin_intr; static driver_filter_t mgb_rxq_intr; static bool mgb_intr_test(struct mgb_softc *); /* MII methods */ static miibus_readreg_t mgb_miibus_readreg; static miibus_writereg_t mgb_miibus_writereg; static miibus_linkchg_t mgb_miibus_linkchg; static miibus_statchg_t mgb_miibus_statchg; static int mgb_media_change(if_t); static void mgb_media_status(if_t, struct ifmediareq *); /* Helper/Test functions */ static int mgb_test_bar(struct mgb_softc *); static int mgb_alloc_regs(struct mgb_softc *); static int mgb_release_regs(struct mgb_softc *); static void mgb_get_ethaddr(struct mgb_softc *, struct ether_addr *); static int mgb_wait_for_bits(struct mgb_softc *, int, int, int); /* H/W init, reset and teardown helpers */ static int mgb_hw_init(struct mgb_softc *); static int mgb_hw_teardown(struct mgb_softc *); static int mgb_hw_reset(struct mgb_softc *); static int mgb_mac_init(struct mgb_softc *); static int mgb_dmac_reset(struct mgb_softc *); static int mgb_phy_reset(struct mgb_softc *); static int mgb_dma_init(struct mgb_softc *); static int mgb_dma_tx_ring_init(struct mgb_softc *, int); static int mgb_dma_rx_ring_init(struct mgb_softc *, int); static int mgb_dmac_control(struct mgb_softc *, int, int, enum mgb_dmac_cmd); static int mgb_fct_control(struct mgb_softc *, int, int, enum mgb_fct_cmd); /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t mgb_methods[] = { /* Device interface */ DEVMETHOD(device_register, mgb_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD(device_suspend, iflib_device_suspend), DEVMETHOD(device_resume, iflib_device_resume), /* MII Interface */ DEVMETHOD(miibus_readreg, mgb_miibus_readreg), DEVMETHOD(miibus_writereg, mgb_miibus_writereg), DEVMETHOD(miibus_linkchg, mgb_miibus_linkchg), DEVMETHOD(miibus_statchg, mgb_miibus_statchg), DEVMETHOD_END }; static driver_t mgb_driver = { "mgb", mgb_methods, sizeof(struct mgb_softc) }; DRIVER_MODULE(mgb, pci, mgb_driver, NULL, NULL); IFLIB_PNP_INFO(pci, mgb, mgb_vendor_info_array); MODULE_VERSION(mgb, 1); #if 0 /* MIIBUS_DEBUG */ /* If MIIBUS debug stuff is in attach then order matters. Use below instead. */ DRIVER_MODULE_ORDERED(miibus, mgb, miibus_driver, NULL, NULL, SI_ORDER_ANY); #endif /* MIIBUS_DEBUG */ DRIVER_MODULE(miibus, mgb, miibus_driver, NULL, NULL); MODULE_DEPEND(mgb, pci, 1, 1, 1); MODULE_DEPEND(mgb, ether, 1, 1, 1); MODULE_DEPEND(mgb, miibus, 1, 1, 1); MODULE_DEPEND(mgb, iflib, 1, 1, 1); static device_method_t mgb_iflib_methods[] = { DEVMETHOD(ifdi_attach_pre, mgb_attach_pre), DEVMETHOD(ifdi_attach_post, mgb_attach_post), DEVMETHOD(ifdi_detach, mgb_detach), DEVMETHOD(ifdi_init, mgb_init), DEVMETHOD(ifdi_stop, mgb_stop), DEVMETHOD(ifdi_tx_queues_alloc, mgb_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, mgb_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, mgb_queues_free), DEVMETHOD(ifdi_msix_intr_assign, mgb_msix_intr_assign), DEVMETHOD(ifdi_tx_queue_intr_enable, mgb_tx_queue_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, mgb_rx_queue_intr_enable), DEVMETHOD(ifdi_intr_enable, mgb_intr_enable_all), DEVMETHOD(ifdi_intr_disable, mgb_intr_disable_all), #if 0 /* Not yet implemented IFLIB methods */ /* * Set multicast addresses, mtu and promiscuous mode */ DEVMETHOD(ifdi_multi_set, mgb_multi_set), DEVMETHOD(ifdi_mtu_set, mgb_mtu_set), DEVMETHOD(ifdi_promisc_set, mgb_promisc_set), /* * Needed for VLAN support */ DEVMETHOD(ifdi_vlan_register, mgb_vlan_register), DEVMETHOD(ifdi_vlan_unregister, mgb_vlan_unregister), DEVMETHOD(ifdi_needs_restart, mgb_if_needs_restart), /* * Needed for WOL support * at the very least. */ DEVMETHOD(ifdi_shutdown, mgb_shutdown), DEVMETHOD(ifdi_suspend, mgb_suspend), DEVMETHOD(ifdi_resume, mgb_resume), #endif /* UNUSED_IFLIB_METHODS */ DEVMETHOD_END }; static driver_t mgb_iflib_driver = { "mgb", mgb_iflib_methods, sizeof(struct mgb_softc) }; static struct if_txrx mgb_txrx = { .ift_txd_encap = mgb_isc_txd_encap, .ift_txd_flush = mgb_isc_txd_flush, .ift_txd_credits_update = mgb_isc_txd_credits_update, .ift_rxd_available = mgb_isc_rxd_available, .ift_rxd_pkt_get = mgb_isc_rxd_pkt_get, .ift_rxd_refill = mgb_isc_rxd_refill, .ift_rxd_flush = mgb_isc_rxd_flush, .ift_legacy_intr = mgb_legacy_intr }; static struct if_shared_ctx mgb_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE, .isc_admin_intrcnt = 1, .isc_flags = IFLIB_DRIVER_MEDIA /* | IFLIB_HAS_RXCQ | IFLIB_HAS_TXCQ*/, .isc_vendor_info = mgb_vendor_info_array, .isc_driver_version = "1", .isc_driver = &mgb_iflib_driver, /* 2 queues per set for TX and RX (ring queue, head writeback queue) */ .isc_ntxqs = 2, .isc_tx_maxsize = MGB_DMA_MAXSEGS * MCLBYTES, /* .isc_tx_nsegments = MGB_DMA_MAXSEGS, */ .isc_tx_maxsegsize = MCLBYTES, .isc_ntxd_min = {1, 1}, /* Will want to make this bigger */ .isc_ntxd_max = {MGB_DMA_RING_SIZE, 1}, .isc_ntxd_default = {MGB_DMA_RING_SIZE, 1}, .isc_nrxqs = 2, .isc_rx_maxsize = MCLBYTES, .isc_rx_nsegments = 1, .isc_rx_maxsegsize = MCLBYTES, .isc_nrxd_min = {1, 1}, /* Will want to make this bigger */ .isc_nrxd_max = {MGB_DMA_RING_SIZE, 1}, .isc_nrxd_default = {MGB_DMA_RING_SIZE, 1}, .isc_nfl = 1, /*one free list since there is only one queue */ #if 0 /* UNUSED_CTX */ .isc_tso_maxsize = MGB_TSO_MAXSIZE + sizeof(struct ether_vlan_header), .isc_tso_maxsegsize = MGB_TX_MAXSEGSIZE, #endif /* UNUSED_CTX */ }; /*********************************************************************/ static void * mgb_register(device_t dev) { return (&mgb_sctx_init); } static int mgb_attach_pre(if_ctx_t ctx) { struct mgb_softc *sc; if_softc_ctx_t scctx; int error, phyaddr, rid; struct ether_addr hwaddr; struct mii_data *miid; sc = iflib_get_softc(ctx); sc->ctx = ctx; sc->dev = iflib_get_dev(ctx); scctx = iflib_get_softc_ctx(ctx); /* IFLIB required setup */ scctx->isc_txrx = &mgb_txrx; scctx->isc_tx_nsegments = MGB_DMA_MAXSEGS; /* Ring desc queues */ scctx->isc_txqsizes[0] = sizeof(struct mgb_ring_desc) * scctx->isc_ntxd[0]; scctx->isc_rxqsizes[0] = sizeof(struct mgb_ring_desc) * scctx->isc_nrxd[0]; /* Head WB queues */ scctx->isc_txqsizes[1] = sizeof(uint32_t) * scctx->isc_ntxd[1]; scctx->isc_rxqsizes[1] = sizeof(uint32_t) * scctx->isc_nrxd[1]; /* XXX: Must have 1 txqset, but can have up to 4 rxqsets */ scctx->isc_nrxqsets = 1; scctx->isc_ntxqsets = 1; /* scctx->isc_tx_csum_flags = (CSUM_TCP | CSUM_UDP) | (CSUM_TCP_IPV6 | CSUM_UDP_IPV6) | CSUM_TSO */ scctx->isc_tx_csum_flags = 0; scctx->isc_capabilities = scctx->isc_capenable = 0; #if 0 /* * CSUM, TSO and VLAN support are TBD */ IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6 | IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_RXCSUM | IFCAP_RXCSUM_IPV6 | IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWTSO | IFCAP_JUMBO_MTU; scctx->isc_capabilities |= IFCAP_LRO | IFCAP_VLAN_HWFILTER; #endif /* get the BAR */ error = mgb_alloc_regs(sc); if (error != 0) { device_printf(sc->dev, "Unable to allocate bus resource: registers.\n"); goto fail; } error = mgb_test_bar(sc); if (error != 0) goto fail; error = mgb_hw_init(sc); if (error != 0) { device_printf(sc->dev, "MGB device init failed. (err: %d)\n", error); goto fail; } switch (pci_get_device(sc->dev)) { case MGB_LAN7430_DEVICE_ID: phyaddr = 1; break; case MGB_LAN7431_DEVICE_ID: default: phyaddr = MII_PHY_ANY; break; } /* XXX: Would be nice(r) if locked methods were here */ error = mii_attach(sc->dev, &sc->miibus, iflib_get_ifp(ctx), mgb_media_change, mgb_media_status, BMSR_DEFCAPMASK, phyaddr, MII_OFFSET_ANY, MIIF_DOPAUSE); if (error != 0) { device_printf(sc->dev, "Failed to attach MII interface\n"); goto fail; } miid = device_get_softc(sc->miibus); scctx->isc_media = &miid->mii_media; scctx->isc_msix_bar = pci_msix_table_bar(sc->dev); /** Setup PBA BAR **/ rid = pci_msix_pba_bar(sc->dev); if (rid != scctx->isc_msix_bar) { sc->pba = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->pba == NULL) { error = ENXIO; device_printf(sc->dev, "Failed to setup PBA BAR\n"); goto fail; } } mgb_get_ethaddr(sc, &hwaddr); if (ETHER_IS_BROADCAST(hwaddr.octet) || ETHER_IS_MULTICAST(hwaddr.octet) || ETHER_IS_ZERO(hwaddr.octet)) ether_gen_addr(iflib_get_ifp(ctx), &hwaddr); /* * XXX: if the MAC address was generated the linux driver * writes it back to the device. */ iflib_set_mac(ctx, hwaddr.octet); /* Map all vectors to vector 0 (admin interrupts) by default. */ CSR_WRITE_REG(sc, MGB_INTR_VEC_RX_MAP, 0); CSR_WRITE_REG(sc, MGB_INTR_VEC_TX_MAP, 0); CSR_WRITE_REG(sc, MGB_INTR_VEC_OTHER_MAP, 0); return (0); fail: mgb_detach(ctx); return (error); } static int mgb_attach_post(if_ctx_t ctx) { struct mgb_softc *sc; sc = iflib_get_softc(ctx); device_printf(sc->dev, "Interrupt test: %s\n", (mgb_intr_test(sc) ? "PASS" : "FAIL")); return (0); } static int mgb_detach(if_ctx_t ctx) { struct mgb_softc *sc; int error; sc = iflib_get_softc(ctx); /* XXX: Should report errors but still detach everything. */ error = mgb_hw_teardown(sc); /* Release IRQs */ iflib_irq_free(ctx, &sc->rx_irq); iflib_irq_free(ctx, &sc->admin_irq); - if (sc->miibus != NULL) - device_delete_child(sc->dev, sc->miibus); + bus_generic_detach(sc->dev); if (sc->pba != NULL) error = bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->pba), sc->pba); sc->pba = NULL; error = mgb_release_regs(sc); return (error); } static int mgb_media_change(if_t ifp) { struct mii_data *miid; struct mii_softc *miisc; struct mgb_softc *sc; if_ctx_t ctx; int needs_reset; ctx = if_getsoftc(ifp); sc = iflib_get_softc(ctx); miid = device_get_softc(sc->miibus); LIST_FOREACH(miisc, &miid->mii_phys, mii_list) PHY_RESET(miisc); needs_reset = mii_mediachg(miid); if (needs_reset != 0) if_init(ifp, ctx); return (needs_reset); } static void mgb_media_status(if_t ifp, struct ifmediareq *ifmr) { struct mgb_softc *sc; struct mii_data *miid; sc = iflib_get_softc(if_getsoftc(ifp)); miid = device_get_softc(sc->miibus); if ((if_getflags(ifp) & IFF_UP) == 0) return; mii_pollstat(miid); ifmr->ifm_active = miid->mii_media_active; ifmr->ifm_status = miid->mii_media_status; } static int mgb_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct mgb_softc *sc; struct mgb_ring_data *rdata; int q; sc = iflib_get_softc(ctx); KASSERT(ntxqsets == 1, ("ntxqsets = %d", ntxqsets)); rdata = &sc->tx_ring_data; for (q = 0; q < ntxqsets; q++) { KASSERT(ntxqs == 2, ("ntxqs = %d", ntxqs)); /* Ring */ rdata->ring = (struct mgb_ring_desc *) vaddrs[q * ntxqs + 0]; rdata->ring_bus_addr = paddrs[q * ntxqs + 0]; /* Head WB */ rdata->head_wb = (uint32_t *) vaddrs[q * ntxqs + 1]; rdata->head_wb_bus_addr = paddrs[q * ntxqs + 1]; } return (0); } static int mgb_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct mgb_softc *sc; struct mgb_ring_data *rdata; int q; sc = iflib_get_softc(ctx); KASSERT(nrxqsets == 1, ("nrxqsets = %d", nrxqsets)); rdata = &sc->rx_ring_data; for (q = 0; q < nrxqsets; q++) { KASSERT(nrxqs == 2, ("nrxqs = %d", nrxqs)); /* Ring */ rdata->ring = (struct mgb_ring_desc *) vaddrs[q * nrxqs + 0]; rdata->ring_bus_addr = paddrs[q * nrxqs + 0]; /* Head WB */ rdata->head_wb = (uint32_t *) vaddrs[q * nrxqs + 1]; rdata->head_wb_bus_addr = paddrs[q * nrxqs + 1]; } return (0); } static void mgb_queues_free(if_ctx_t ctx) { struct mgb_softc *sc; sc = iflib_get_softc(ctx); memset(&sc->rx_ring_data, 0, sizeof(struct mgb_ring_data)); memset(&sc->tx_ring_data, 0, sizeof(struct mgb_ring_data)); } static void mgb_init(if_ctx_t ctx) { struct mgb_softc *sc; struct mii_data *miid; int error; sc = iflib_get_softc(ctx); miid = device_get_softc(sc->miibus); device_printf(sc->dev, "running init ...\n"); mgb_dma_init(sc); /* XXX: Turn off perfect filtering, turn on (broad|multi|uni)cast rx */ CSR_CLEAR_REG(sc, MGB_RFE_CTL, MGB_RFE_ALLOW_PERFECT_FILTER); CSR_UPDATE_REG(sc, MGB_RFE_CTL, MGB_RFE_ALLOW_BROADCAST | MGB_RFE_ALLOW_MULTICAST | MGB_RFE_ALLOW_UNICAST); error = mii_mediachg(miid); /* Not much we can do if this fails. */ if (error) device_printf(sc->dev, "%s: mii_mediachg returned %d", __func__, error); } #if 0 static void mgb_dump_some_stats(struct mgb_softc *sc) { int i; int first_stat = 0x1200; int last_stat = 0x12FC; for (i = first_stat; i <= last_stat; i += 4) if (CSR_READ_REG(sc, i) != 0) device_printf(sc->dev, "0x%04x: 0x%08x\n", i, CSR_READ_REG(sc, i)); char *stat_names[] = { "MAC_ERR_STS ", "FCT_INT_STS ", "DMAC_CFG ", "DMAC_CMD ", "DMAC_INT_STS ", "DMAC_INT_EN ", "DMAC_RX_ERR_STS0 ", "DMAC_RX_ERR_STS1 ", "DMAC_RX_ERR_STS2 ", "DMAC_RX_ERR_STS3 ", "INT_STS ", "INT_EN ", "INT_VEC_EN ", "INT_VEC_MAP0 ", "INT_VEC_MAP1 ", "INT_VEC_MAP2 ", "TX_HEAD0", "TX_TAIL0", "DMAC_TX_ERR_STS0 ", NULL }; int stats[] = { 0x114, 0xA0, 0xC00, 0xC0C, 0xC10, 0xC14, 0xC60, 0xCA0, 0xCE0, 0xD20, 0x780, 0x788, 0x794, 0x7A0, 0x7A4, 0x780, 0xD58, 0xD5C, 0xD60, 0x0 }; i = 0; printf("==============================\n"); while (stats[i++]) device_printf(sc->dev, "%s at offset 0x%04x = 0x%08x\n", stat_names[i - 1], stats[i - 1], CSR_READ_REG(sc, stats[i - 1])); printf("==== TX RING DESCS ====\n"); for (i = 0; i < MGB_DMA_RING_SIZE; i++) device_printf(sc->dev, "ring[%d].data0=0x%08x\n" "ring[%d].data1=0x%08x\n" "ring[%d].data2=0x%08x\n" "ring[%d].data3=0x%08x\n", i, sc->tx_ring_data.ring[i].ctl, i, sc->tx_ring_data.ring[i].addr.low, i, sc->tx_ring_data.ring[i].addr.high, i, sc->tx_ring_data.ring[i].sts); device_printf(sc->dev, "==== DUMP_TX_DMA_RAM ====\n"); CSR_WRITE_REG(sc, 0x24, 0xF); // DP_SEL & TX_RAM_0 for (i = 0; i < 128; i++) { CSR_WRITE_REG(sc, 0x2C, i); // DP_ADDR CSR_WRITE_REG(sc, 0x28, 0); // DP_CMD while ((CSR_READ_REG(sc, 0x24) & 0x80000000) == 0) // DP_SEL & READY DELAY(1000); device_printf(sc->dev, "DMAC_TX_RAM_0[%u]=%08x\n", i, CSR_READ_REG(sc, 0x30)); // DP_DATA } } #endif static void mgb_stop(if_ctx_t ctx) { struct mgb_softc *sc ; if_softc_ctx_t scctx; int i; sc = iflib_get_softc(ctx); scctx = iflib_get_softc_ctx(ctx); /* XXX: Could potentially timeout */ for (i = 0; i < scctx->isc_nrxqsets; i++) { mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_STOP); mgb_fct_control(sc, MGB_FCT_RX_CTL, 0, FCT_DISABLE); } for (i = 0; i < scctx->isc_ntxqsets; i++) { mgb_dmac_control(sc, MGB_DMAC_TX_START, 0, DMAC_STOP); mgb_fct_control(sc, MGB_FCT_TX_CTL, 0, FCT_DISABLE); } } static int mgb_legacy_intr(void *xsc) { struct mgb_softc *sc; sc = xsc; iflib_admin_intr_deferred(sc->ctx); return (FILTER_HANDLED); } static int mgb_rxq_intr(void *xsc) { struct mgb_softc *sc; if_softc_ctx_t scctx; uint32_t intr_sts, intr_en; int qidx; sc = xsc; scctx = iflib_get_softc_ctx(sc->ctx); intr_sts = CSR_READ_REG(sc, MGB_INTR_STS); intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET); intr_sts &= intr_en; for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) { if ((intr_sts & MGB_INTR_STS_RX(qidx))){ CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, MGB_INTR_STS_RX(qidx)); CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_RX(qidx)); } } return (FILTER_SCHEDULE_THREAD); } static int mgb_admin_intr(void *xsc) { struct mgb_softc *sc; if_softc_ctx_t scctx; uint32_t intr_sts, intr_en; int qidx; sc = xsc; scctx = iflib_get_softc_ctx(sc->ctx); intr_sts = CSR_READ_REG(sc, MGB_INTR_STS); intr_en = CSR_READ_REG(sc, MGB_INTR_ENBL_SET); intr_sts &= intr_en; /* TODO: shouldn't continue if suspended */ if ((intr_sts & MGB_INTR_STS_ANY) == 0) return (FILTER_STRAY); if ((intr_sts & MGB_INTR_STS_TEST) != 0) { sc->isr_test_flag = true; CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST); return (FILTER_HANDLED); } if ((intr_sts & MGB_INTR_STS_RX_ANY) != 0) { for (qidx = 0; qidx < scctx->isc_nrxqsets; qidx++) { if ((intr_sts & MGB_INTR_STS_RX(qidx))){ iflib_rx_intr_deferred(sc->ctx, qidx); } } return (FILTER_HANDLED); } /* XXX: TX interrupts should not occur */ if ((intr_sts & MGB_INTR_STS_TX_ANY) != 0) { for (qidx = 0; qidx < scctx->isc_ntxqsets; qidx++) { if ((intr_sts & MGB_INTR_STS_RX(qidx))) { /* clear the interrupt sts and run handler */ CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, MGB_INTR_STS_TX(qidx)); CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TX(qidx)); iflib_tx_intr_deferred(sc->ctx, qidx); } } return (FILTER_HANDLED); } return (FILTER_SCHEDULE_THREAD); } static int mgb_msix_intr_assign(if_ctx_t ctx, int msix) { struct mgb_softc *sc; if_softc_ctx_t scctx; int error, i, vectorid; char irq_name[16]; sc = iflib_get_softc(ctx); scctx = iflib_get_softc_ctx(ctx); KASSERT(scctx->isc_nrxqsets == 1 && scctx->isc_ntxqsets == 1, ("num rxqsets/txqsets != 1 ")); /* * First vector should be admin interrupts, others vectors are TX/RX * * RIDs start at 1, and vector ids start at 0. */ vectorid = 0; error = iflib_irq_alloc_generic(ctx, &sc->admin_irq, vectorid + 1, IFLIB_INTR_ADMIN, mgb_admin_intr, sc, 0, "admin"); if (error) { device_printf(sc->dev, "Failed to register admin interrupt handler\n"); return (error); } for (i = 0; i < scctx->isc_nrxqsets; i++) { vectorid++; snprintf(irq_name, sizeof(irq_name), "rxq%d", i); error = iflib_irq_alloc_generic(ctx, &sc->rx_irq, vectorid + 1, IFLIB_INTR_RXTX, mgb_rxq_intr, sc, i, irq_name); if (error) { device_printf(sc->dev, "Failed to register rxq %d interrupt handler\n", i); return (error); } CSR_UPDATE_REG(sc, MGB_INTR_VEC_RX_MAP, MGB_INTR_VEC_MAP(vectorid, i)); } /* Not actually mapping hw TX interrupts ... */ for (i = 0; i < scctx->isc_ntxqsets; i++) { snprintf(irq_name, sizeof(irq_name), "txq%d", i); iflib_softirq_alloc_generic(ctx, NULL, IFLIB_INTR_TX, NULL, i, irq_name); } return (0); } static void mgb_intr_enable_all(if_ctx_t ctx) { struct mgb_softc *sc; if_softc_ctx_t scctx; int i, dmac_enable = 0, intr_sts = 0, vec_en = 0; sc = iflib_get_softc(ctx); scctx = iflib_get_softc_ctx(ctx); intr_sts |= MGB_INTR_STS_ANY; vec_en |= MGB_INTR_STS_ANY; for (i = 0; i < scctx->isc_nrxqsets; i++) { intr_sts |= MGB_INTR_STS_RX(i); dmac_enable |= MGB_DMAC_RX_INTR_ENBL(i); vec_en |= MGB_INTR_RX_VEC_STS(i); } /* TX interrupts aren't needed ... */ CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, intr_sts); CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, vec_en); CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, dmac_enable); CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, dmac_enable); } static void mgb_intr_disable_all(if_ctx_t ctx) { struct mgb_softc *sc; sc = iflib_get_softc(ctx); CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, UINT32_MAX); CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_CLR, UINT32_MAX); CSR_WRITE_REG(sc, MGB_INTR_STS, UINT32_MAX); CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_CLR, UINT32_MAX); CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, UINT32_MAX); } static int mgb_rx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { /* called after successful rx isr */ struct mgb_softc *sc; sc = iflib_get_softc(ctx); CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_RX_VEC_STS(qid)); CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_RX(qid)); CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_RX_INTR_ENBL(qid)); CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_RX_INTR_ENBL(qid)); return (0); } static int mgb_tx_queue_intr_enable(if_ctx_t ctx, uint16_t qid) { /* XXX: not called (since tx interrupts not used) */ struct mgb_softc *sc; sc = iflib_get_softc(ctx); CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_TX(qid)); CSR_WRITE_REG(sc, MGB_DMAC_INTR_STS, MGB_DMAC_TX_INTR_ENBL(qid)); CSR_WRITE_REG(sc, MGB_DMAC_INTR_ENBL_SET, MGB_DMAC_TX_INTR_ENBL(qid)); return (0); } static bool mgb_intr_test(struct mgb_softc *sc) { int i; sc->isr_test_flag = false; CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST); CSR_WRITE_REG(sc, MGB_INTR_VEC_ENBL_SET, MGB_INTR_STS_ANY); CSR_WRITE_REG(sc, MGB_INTR_ENBL_SET, MGB_INTR_STS_ANY | MGB_INTR_STS_TEST); CSR_WRITE_REG(sc, MGB_INTR_SET, MGB_INTR_STS_TEST); if (sc->isr_test_flag) return (true); for (i = 0; i < MGB_TIMEOUT; i++) { DELAY(10); if (sc->isr_test_flag) break; } CSR_WRITE_REG(sc, MGB_INTR_ENBL_CLR, MGB_INTR_STS_TEST); CSR_WRITE_REG(sc, MGB_INTR_STS, MGB_INTR_STS_TEST); return (sc->isr_test_flag); } static int mgb_isc_txd_encap(void *xsc , if_pkt_info_t ipi) { struct mgb_softc *sc; struct mgb_ring_data *rdata; struct mgb_ring_desc *txd; bus_dma_segment_t *segs; qidx_t pidx, nsegs; int i; KASSERT(ipi->ipi_qsidx == 0, ("tried to refill TX Channel %d.\n", ipi->ipi_qsidx)); sc = xsc; rdata = &sc->tx_ring_data; pidx = ipi->ipi_pidx; segs = ipi->ipi_segs; nsegs = ipi->ipi_nsegs; /* For each seg, create a descriptor */ for (i = 0; i < nsegs; ++i) { KASSERT(nsegs == 1, ("Multisegment packet !!!!!\n")); txd = &rdata->ring[pidx]; txd->ctl = htole32( (segs[i].ds_len & MGB_DESC_CTL_BUFLEN_MASK ) | /* * XXX: This will be wrong in the multipacket case * I suspect FS should be for the first packet and * LS should be for the last packet */ MGB_TX_DESC_CTL_FS | MGB_TX_DESC_CTL_LS | MGB_DESC_CTL_FCS); txd->addr.low = htole32(CSR_TRANSLATE_ADDR_LOW32( segs[i].ds_addr)); txd->addr.high = htole32(CSR_TRANSLATE_ADDR_HIGH32( segs[i].ds_addr)); txd->sts = htole32( (segs[i].ds_len << 16) & MGB_DESC_FRAME_LEN_MASK); pidx = MGB_NEXT_RING_IDX(pidx); } ipi->ipi_new_pidx = pidx; return (0); } static void mgb_isc_txd_flush(void *xsc, uint16_t txqid, qidx_t pidx) { struct mgb_softc *sc; struct mgb_ring_data *rdata; KASSERT(txqid == 0, ("tried to flush TX Channel %d.\n", txqid)); sc = xsc; rdata = &sc->tx_ring_data; if (rdata->last_tail != pidx) { rdata->last_tail = pidx; CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(txqid), rdata->last_tail); } } static int mgb_isc_txd_credits_update(void *xsc, uint16_t txqid, bool clear) { struct mgb_softc *sc; struct mgb_ring_desc *txd; struct mgb_ring_data *rdata; int processed = 0; /* * > If clear is true, we need to report the number of TX command ring * > descriptors that have been processed by the device. If clear is * > false, we just need to report whether or not at least one TX * > command ring descriptor has been processed by the device. * - vmx driver */ KASSERT(txqid == 0, ("tried to credits_update TX Channel %d.\n", txqid)); sc = xsc; rdata = &sc->tx_ring_data; while (*(rdata->head_wb) != rdata->last_head) { if (!clear) return (1); txd = &rdata->ring[rdata->last_head]; memset(txd, 0, sizeof(struct mgb_ring_desc)); rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head); processed++; } return (processed); } static int mgb_isc_rxd_available(void *xsc, uint16_t rxqid, qidx_t idx, qidx_t budget) { struct mgb_softc *sc; struct mgb_ring_data *rdata; int avail = 0; sc = xsc; KASSERT(rxqid == 0, ("tried to check availability in RX Channel %d.\n", rxqid)); rdata = &sc->rx_ring_data; for (; idx != *(rdata->head_wb); idx = MGB_NEXT_RING_IDX(idx)) { avail++; /* XXX: Could verify desc is device owned here */ if (avail == budget) break; } return (avail); } static int mgb_isc_rxd_pkt_get(void *xsc, if_rxd_info_t ri) { struct mgb_softc *sc; struct mgb_ring_data *rdata; struct mgb_ring_desc rxd; int total_len; KASSERT(ri->iri_qsidx == 0, ("tried to check availability in RX Channel %d\n", ri->iri_qsidx)); sc = xsc; total_len = 0; rdata = &sc->rx_ring_data; while (*(rdata->head_wb) != rdata->last_head) { /* copy ring desc and do swapping */ rxd = rdata->ring[rdata->last_head]; rxd.ctl = le32toh(rxd.ctl); rxd.addr.low = le32toh(rxd.ctl); rxd.addr.high = le32toh(rxd.ctl); rxd.sts = le32toh(rxd.ctl); if ((rxd.ctl & MGB_DESC_CTL_OWN) != 0) { device_printf(sc->dev, "Tried to read descriptor ... " "found that it's owned by the driver\n"); return (EINVAL); } if ((rxd.ctl & MGB_RX_DESC_CTL_FS) == 0) { device_printf(sc->dev, "Tried to read descriptor ... " "found that FS is not set.\n"); device_printf(sc->dev, "Tried to read descriptor ... that it FS is not set.\n"); return (EINVAL); } /* XXX: Multi-packet support */ if ((rxd.ctl & MGB_RX_DESC_CTL_LS) == 0) { device_printf(sc->dev, "Tried to read descriptor ... " "found that LS is not set. (Multi-buffer packets not yet supported)\n"); return (EINVAL); } ri->iri_frags[0].irf_flid = 0; ri->iri_frags[0].irf_idx = rdata->last_head; ri->iri_frags[0].irf_len = MGB_DESC_GET_FRAME_LEN(&rxd); total_len += ri->iri_frags[0].irf_len; rdata->last_head = MGB_NEXT_RING_IDX(rdata->last_head); break; } ri->iri_nfrags = 1; ri->iri_len = total_len; return (0); } static void mgb_isc_rxd_refill(void *xsc, if_rxd_update_t iru) { struct mgb_softc *sc; struct mgb_ring_data *rdata; struct mgb_ring_desc *rxd; uint64_t *paddrs; qidx_t *idxs; qidx_t idx; int count, len; count = iru->iru_count; len = iru->iru_buf_size; idxs = iru->iru_idxs; paddrs = iru->iru_paddrs; KASSERT(iru->iru_qsidx == 0, ("tried to refill RX Channel %d.\n", iru->iru_qsidx)); sc = xsc; rdata = &sc->rx_ring_data; while (count > 0) { idx = idxs[--count]; rxd = &rdata->ring[idx]; rxd->sts = 0; rxd->addr.low = htole32(CSR_TRANSLATE_ADDR_LOW32(paddrs[count])); rxd->addr.high = htole32(CSR_TRANSLATE_ADDR_HIGH32(paddrs[count])); rxd->ctl = htole32(MGB_DESC_CTL_OWN | (len & MGB_DESC_CTL_BUFLEN_MASK)); } return; } static void mgb_isc_rxd_flush(void *xsc, uint16_t rxqid, uint8_t flid, qidx_t pidx) { struct mgb_softc *sc; sc = xsc; KASSERT(rxqid == 0, ("tried to flush RX Channel %d.\n", rxqid)); /* * According to the programming guide, last_tail must be set to * the last valid RX descriptor, rather than to the one past that. * Note that this is not true for the TX ring! */ sc->rx_ring_data.last_tail = MGB_PREV_RING_IDX(pidx); CSR_WRITE_REG(sc, MGB_DMA_RX_TAIL(rxqid), sc->rx_ring_data.last_tail); return; } static int mgb_test_bar(struct mgb_softc *sc) { uint32_t id_rev, dev_id; id_rev = CSR_READ_REG(sc, 0); dev_id = id_rev >> 16; if (dev_id == MGB_LAN7430_DEVICE_ID || dev_id == MGB_LAN7431_DEVICE_ID) { return (0); } else { device_printf(sc->dev, "ID check failed.\n"); return (ENXIO); } } static int mgb_alloc_regs(struct mgb_softc *sc) { int rid; rid = PCIR_BAR(MGB_BAR); pci_enable_busmaster(sc->dev); sc->regs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->regs == NULL) return (ENXIO); return (0); } static int mgb_release_regs(struct mgb_softc *sc) { int error = 0; if (sc->regs != NULL) error = bus_release_resource(sc->dev, SYS_RES_MEMORY, rman_get_rid(sc->regs), sc->regs); sc->regs = NULL; pci_disable_busmaster(sc->dev); return (error); } static int mgb_dma_init(struct mgb_softc *sc) { if_softc_ctx_t scctx; int ch, error = 0; scctx = iflib_get_softc_ctx(sc->ctx); for (ch = 0; ch < scctx->isc_nrxqsets; ch++) if ((error = mgb_dma_rx_ring_init(sc, ch))) goto fail; for (ch = 0; ch < scctx->isc_nrxqsets; ch++) if ((error = mgb_dma_tx_ring_init(sc, ch))) goto fail; fail: return (error); } static int mgb_dma_rx_ring_init(struct mgb_softc *sc, int channel) { struct mgb_ring_data *rdata; int ring_config, error = 0; rdata = &sc->rx_ring_data; mgb_dmac_control(sc, MGB_DMAC_RX_START, 0, DMAC_RESET); KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_RX_START, channel), ("Trying to init channels when not in init state\n")); /* write ring address */ if (rdata->ring_bus_addr == 0) { device_printf(sc->dev, "Invalid ring bus addr.\n"); goto fail; } CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_H(channel), CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr)); CSR_WRITE_REG(sc, MGB_DMA_RX_BASE_L(channel), CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr)); /* write head pointer writeback address */ if (rdata->head_wb_bus_addr == 0) { device_printf(sc->dev, "Invalid head wb bus addr.\n"); goto fail; } CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_H(channel), CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr)); CSR_WRITE_REG(sc, MGB_DMA_RX_HEAD_WB_L(channel), CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr)); /* Enable head pointer writeback */ CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG0(channel), MGB_DMA_HEAD_WB_ENBL); ring_config = CSR_READ_REG(sc, MGB_DMA_RX_CONFIG1(channel)); /* ring size */ ring_config &= ~MGB_DMA_RING_LEN_MASK; ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK); /* packet padding (PAD_2 is better for IP header alignment ...) */ ring_config &= ~MGB_DMA_RING_PAD_MASK; ring_config |= (MGB_DMA_RING_PAD_0 & MGB_DMA_RING_PAD_MASK); CSR_WRITE_REG(sc, MGB_DMA_RX_CONFIG1(channel), ring_config); rdata->last_head = CSR_READ_REG(sc, MGB_DMA_RX_HEAD(channel)); mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_RESET); if (error != 0) { device_printf(sc->dev, "Failed to reset RX FCT.\n"); goto fail; } mgb_fct_control(sc, MGB_FCT_RX_CTL, channel, FCT_ENABLE); if (error != 0) { device_printf(sc->dev, "Failed to enable RX FCT.\n"); goto fail; } mgb_dmac_control(sc, MGB_DMAC_RX_START, channel, DMAC_START); if (error != 0) device_printf(sc->dev, "Failed to start RX DMAC.\n"); fail: return (error); } static int mgb_dma_tx_ring_init(struct mgb_softc *sc, int channel) { struct mgb_ring_data *rdata; int ring_config, error = 0; rdata = &sc->tx_ring_data; if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel, FCT_RESET))) { device_printf(sc->dev, "Failed to reset TX FCT.\n"); goto fail; } if ((error = mgb_fct_control(sc, MGB_FCT_TX_CTL, channel, FCT_ENABLE))) { device_printf(sc->dev, "Failed to enable TX FCT.\n"); goto fail; } if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel, DMAC_RESET))) { device_printf(sc->dev, "Failed to reset TX DMAC.\n"); goto fail; } KASSERT(MGB_DMAC_STATE_IS_INITIAL(sc, MGB_DMAC_TX_START, channel), ("Trying to init channels in not init state\n")); /* write ring address */ if (rdata->ring_bus_addr == 0) { device_printf(sc->dev, "Invalid ring bus addr.\n"); goto fail; } CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_H(channel), CSR_TRANSLATE_ADDR_HIGH32(rdata->ring_bus_addr)); CSR_WRITE_REG(sc, MGB_DMA_TX_BASE_L(channel), CSR_TRANSLATE_ADDR_LOW32(rdata->ring_bus_addr)); /* write ring size */ ring_config = CSR_READ_REG(sc, MGB_DMA_TX_CONFIG1(channel)); ring_config &= ~MGB_DMA_RING_LEN_MASK; ring_config |= (MGB_DMA_RING_SIZE & MGB_DMA_RING_LEN_MASK); CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG1(channel), ring_config); /* Enable interrupt on completion and head pointer writeback */ ring_config = (MGB_DMA_HEAD_WB_LS_ENBL | MGB_DMA_HEAD_WB_ENBL); CSR_WRITE_REG(sc, MGB_DMA_TX_CONFIG0(channel), ring_config); /* write head pointer writeback address */ if (rdata->head_wb_bus_addr == 0) { device_printf(sc->dev, "Invalid head wb bus addr.\n"); goto fail; } CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_H(channel), CSR_TRANSLATE_ADDR_HIGH32(rdata->head_wb_bus_addr)); CSR_WRITE_REG(sc, MGB_DMA_TX_HEAD_WB_L(channel), CSR_TRANSLATE_ADDR_LOW32(rdata->head_wb_bus_addr)); rdata->last_head = CSR_READ_REG(sc, MGB_DMA_TX_HEAD(channel)); KASSERT(rdata->last_head == 0, ("MGB_DMA_TX_HEAD was not reset.\n")); rdata->last_tail = 0; CSR_WRITE_REG(sc, MGB_DMA_TX_TAIL(channel), rdata->last_tail); if ((error = mgb_dmac_control(sc, MGB_DMAC_TX_START, channel, DMAC_START))) device_printf(sc->dev, "Failed to start TX DMAC.\n"); fail: return (error); } static int mgb_dmac_control(struct mgb_softc *sc, int start, int channel, enum mgb_dmac_cmd cmd) { int error = 0; switch (cmd) { case DMAC_RESET: CSR_WRITE_REG(sc, MGB_DMAC_CMD, MGB_DMAC_CMD_RESET(start, channel)); error = mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0, MGB_DMAC_CMD_RESET(start, channel)); break; case DMAC_START: /* * NOTE: this simplifies the logic, since it will never * try to start in STOP_PENDING, but it also increases work. */ error = mgb_dmac_control(sc, start, channel, DMAC_STOP); if (error != 0) return (error); CSR_WRITE_REG(sc, MGB_DMAC_CMD, MGB_DMAC_CMD_START(start, channel)); break; case DMAC_STOP: CSR_WRITE_REG(sc, MGB_DMAC_CMD, MGB_DMAC_CMD_STOP(start, channel)); error = mgb_wait_for_bits(sc, MGB_DMAC_CMD, MGB_DMAC_CMD_STOP(start, channel), MGB_DMAC_CMD_START(start, channel)); break; } return (error); } static int mgb_fct_control(struct mgb_softc *sc, int reg, int channel, enum mgb_fct_cmd cmd) { switch (cmd) { case FCT_RESET: CSR_WRITE_REG(sc, reg, MGB_FCT_RESET(channel)); return (mgb_wait_for_bits(sc, reg, 0, MGB_FCT_RESET(channel))); case FCT_ENABLE: CSR_WRITE_REG(sc, reg, MGB_FCT_ENBL(channel)); return (0); case FCT_DISABLE: CSR_WRITE_REG(sc, reg, MGB_FCT_DSBL(channel)); return (mgb_wait_for_bits(sc, reg, 0, MGB_FCT_ENBL(channel))); } } static int mgb_hw_teardown(struct mgb_softc *sc) { int err = 0; /* Stop MAC */ CSR_CLEAR_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL); CSR_WRITE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL); if ((err = mgb_wait_for_bits(sc, MGB_MAC_RX, MGB_MAC_DSBL, 0))) return (err); if ((err = mgb_wait_for_bits(sc, MGB_MAC_TX, MGB_MAC_DSBL, 0))) return (err); return (err); } static int mgb_hw_init(struct mgb_softc *sc) { int error = 0; error = mgb_hw_reset(sc); if (error != 0) goto fail; mgb_mac_init(sc); error = mgb_phy_reset(sc); if (error != 0) goto fail; error = mgb_dmac_reset(sc); if (error != 0) goto fail; fail: return (error); } static int mgb_hw_reset(struct mgb_softc *sc) { CSR_UPDATE_REG(sc, MGB_HW_CFG, MGB_LITE_RESET); return (mgb_wait_for_bits(sc, MGB_HW_CFG, 0, MGB_LITE_RESET)); } static int mgb_mac_init(struct mgb_softc *sc) { /** * enable automatic duplex detection and * automatic speed detection */ CSR_UPDATE_REG(sc, MGB_MAC_CR, MGB_MAC_ADD_ENBL | MGB_MAC_ASD_ENBL); CSR_UPDATE_REG(sc, MGB_MAC_TX, MGB_MAC_ENBL); CSR_UPDATE_REG(sc, MGB_MAC_RX, MGB_MAC_ENBL); return (MGB_STS_OK); } static int mgb_phy_reset(struct mgb_softc *sc) { CSR_UPDATE_BYTE(sc, MGB_PMT_CTL, MGB_PHY_RESET); if (mgb_wait_for_bits(sc, MGB_PMT_CTL, 0, MGB_PHY_RESET) == MGB_STS_TIMEOUT) return (MGB_STS_TIMEOUT); return (mgb_wait_for_bits(sc, MGB_PMT_CTL, MGB_PHY_READY, 0)); } static int mgb_dmac_reset(struct mgb_softc *sc) { CSR_WRITE_REG(sc, MGB_DMAC_CMD, MGB_DMAC_RESET); return (mgb_wait_for_bits(sc, MGB_DMAC_CMD, 0, MGB_DMAC_RESET)); } static int mgb_wait_for_bits(struct mgb_softc *sc, int reg, int set_bits, int clear_bits) { int i, val; i = 0; do { /* * XXX: Datasheets states delay should be > 5 microseconds * for device reset. */ DELAY(100); val = CSR_READ_REG(sc, reg); if ((val & set_bits) == set_bits && (val & clear_bits) == 0) return (MGB_STS_OK); } while (i++ < MGB_TIMEOUT); return (MGB_STS_TIMEOUT); } static void mgb_get_ethaddr(struct mgb_softc *sc, struct ether_addr *dest) { CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_L, &dest->octet[0], 4); CSR_READ_REG_BYTES(sc, MGB_MAC_ADDR_BASE_H, &dest->octet[4], 2); } static int mgb_miibus_readreg(device_t dev, int phy, int reg) { struct mgb_softc *sc; int mii_access; sc = iflib_get_softc(device_get_softc(dev)); if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) == MGB_STS_TIMEOUT) return (EIO); mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT; mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT; mii_access |= MGB_MII_BUSY | MGB_MII_READ; CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access); if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) == MGB_STS_TIMEOUT) return (EIO); return (CSR_READ_2_BYTES(sc, MGB_MII_DATA)); } static int mgb_miibus_writereg(device_t dev, int phy, int reg, int data) { struct mgb_softc *sc; int mii_access; sc = iflib_get_softc(device_get_softc(dev)); if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) == MGB_STS_TIMEOUT) return (EIO); mii_access = (phy & MGB_MII_PHY_ADDR_MASK) << MGB_MII_PHY_ADDR_SHIFT; mii_access |= (reg & MGB_MII_REG_ADDR_MASK) << MGB_MII_REG_ADDR_SHIFT; mii_access |= MGB_MII_BUSY | MGB_MII_WRITE; CSR_WRITE_REG(sc, MGB_MII_DATA, data); CSR_WRITE_REG(sc, MGB_MII_ACCESS, mii_access); if (mgb_wait_for_bits(sc, MGB_MII_ACCESS, 0, MGB_MII_BUSY) == MGB_STS_TIMEOUT) return (EIO); return (0); } /* XXX: May need to lock these up */ static void mgb_miibus_statchg(device_t dev) { struct mgb_softc *sc; struct mii_data *miid; sc = iflib_get_softc(device_get_softc(dev)); miid = device_get_softc(sc->miibus); /* Update baudrate in iflib */ sc->baudrate = ifmedia_baudrate(miid->mii_media_active); iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate); } static void mgb_miibus_linkchg(device_t dev) { struct mgb_softc *sc; struct mii_data *miid; int link_state; sc = iflib_get_softc(device_get_softc(dev)); miid = device_get_softc(sc->miibus); /* XXX: copied from miibus_linkchg **/ if (miid->mii_media_status & IFM_AVALID) { if (miid->mii_media_status & IFM_ACTIVE) link_state = LINK_STATE_UP; else link_state = LINK_STATE_DOWN; } else link_state = LINK_STATE_UNKNOWN; sc->link_state = link_state; iflib_link_state_change(sc->ctx, sc->link_state, sc->baudrate); } diff --git a/sys/dev/mlx/mlx.c b/sys/dev/mlx/mlx.c index d9a2a1f5442b..f0c7591803e0 100644 --- a/sys/dev/mlx/mlx.c +++ b/sys/dev/mlx/mlx.c @@ -1,3071 +1,3065 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 1999 Michael Smith * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Driver for the Mylex DAC960 family of RAID controllers. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static struct cdevsw mlx_cdevsw = { .d_version = D_VERSION, .d_open = mlx_open, .d_close = mlx_close, .d_ioctl = mlx_ioctl, .d_name = "mlx", }; /* * Per-interface accessor methods */ static int mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc); static int mlx_v3_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status); static void mlx_v3_intaction(struct mlx_softc *sc, int action); static int mlx_v3_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first); static int mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc); static int mlx_v4_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status); static void mlx_v4_intaction(struct mlx_softc *sc, int action); static int mlx_v4_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first); static int mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc); static int mlx_v5_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status); static void mlx_v5_intaction(struct mlx_softc *sc, int action); static int mlx_v5_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first); /* * Status monitoring */ static void mlx_periodic(void *data); static void mlx_periodic_enquiry(struct mlx_command *mc); static void mlx_periodic_eventlog_poll(struct mlx_softc *sc); static void mlx_periodic_eventlog_respond(struct mlx_command *mc); static void mlx_periodic_rebuild(struct mlx_command *mc); /* * Channel Pause */ static void mlx_pause_action(struct mlx_softc *sc); static void mlx_pause_done(struct mlx_command *mc); /* * Command submission. */ static void *mlx_enquire(struct mlx_softc *sc, int command, size_t bufsize, void (*complete)(struct mlx_command *mc)); static int mlx_flush(struct mlx_softc *sc); static int mlx_check(struct mlx_softc *sc, int drive); static int mlx_rebuild(struct mlx_softc *sc, int channel, int target); static int mlx_wait_command(struct mlx_command *mc); static int mlx_poll_command(struct mlx_command *mc); void mlx_startio_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error); static void mlx_startio(struct mlx_softc *sc); static void mlx_completeio(struct mlx_command *mc); static int mlx_user_command(struct mlx_softc *sc, struct mlx_usercommand *mu); void mlx_user_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error); /* * Command buffer allocation. */ static struct mlx_command *mlx_alloccmd(struct mlx_softc *sc); static void mlx_releasecmd(struct mlx_command *mc); static void mlx_freecmd(struct mlx_command *mc); /* * Command management. */ static int mlx_getslot(struct mlx_command *mc); static void mlx_setup_dmamap(struct mlx_command *mc, bus_dma_segment_t *segs, int nsegments, int error); static void mlx_unmapcmd(struct mlx_command *mc); static int mlx_shutdown_locked(struct mlx_softc *sc); static int mlx_start(struct mlx_command *mc); static int mlx_done(struct mlx_softc *sc, int startio); static void mlx_complete(struct mlx_softc *sc); /* * Debugging. */ static char *mlx_diagnose_command(struct mlx_command *mc); static void mlx_describe_controller(struct mlx_softc *sc); static int mlx_fw_message(struct mlx_softc *sc, int status, int param1, int param2); /* * Utility functions. */ static struct mlx_sysdrive *mlx_findunit(struct mlx_softc *sc, int unit); /******************************************************************************** ******************************************************************************** Public Interfaces ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Free all of the resources associated with (sc) * * Should not be called if the controller is active. */ void mlx_free(struct mlx_softc *sc) { struct mlx_command *mc; debug_called(1); /* destroy control device */ if (sc->mlx_dev_t != NULL) destroy_dev(sc->mlx_dev_t); if (sc->mlx_intr) bus_teardown_intr(sc->mlx_dev, sc->mlx_irq, sc->mlx_intr); /* cancel status timeout */ MLX_IO_LOCK(sc); callout_stop(&sc->mlx_timeout); /* throw away any command buffers */ while ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL) { TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link); mlx_freecmd(mc); } MLX_IO_UNLOCK(sc); callout_drain(&sc->mlx_timeout); /* destroy data-transfer DMA tag */ if (sc->mlx_buffer_dmat) bus_dma_tag_destroy(sc->mlx_buffer_dmat); /* free and destroy DMA memory and tag for s/g lists */ if (sc->mlx_sgbusaddr) bus_dmamap_unload(sc->mlx_sg_dmat, sc->mlx_sg_dmamap); if (sc->mlx_sgtable) bus_dmamem_free(sc->mlx_sg_dmat, sc->mlx_sgtable, sc->mlx_sg_dmamap); if (sc->mlx_sg_dmat) bus_dma_tag_destroy(sc->mlx_sg_dmat); /* disconnect the interrupt handler */ if (sc->mlx_irq != NULL) bus_release_resource(sc->mlx_dev, SYS_RES_IRQ, 0, sc->mlx_irq); /* destroy the parent DMA tag */ if (sc->mlx_parent_dmat) bus_dma_tag_destroy(sc->mlx_parent_dmat); /* release the register window mapping */ if (sc->mlx_mem != NULL) bus_release_resource(sc->mlx_dev, sc->mlx_mem_type, sc->mlx_mem_rid, sc->mlx_mem); /* free controller enquiry data */ if (sc->mlx_enq2 != NULL) free(sc->mlx_enq2, M_DEVBUF); sx_destroy(&sc->mlx_config_lock); mtx_destroy(&sc->mlx_io_lock); } /******************************************************************************** * Map the scatter/gather table into bus space */ static void mlx_dma_map_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error) { struct mlx_softc *sc = (struct mlx_softc *)arg; debug_called(1); /* save base of s/g table's address in bus space */ sc->mlx_sgbusaddr = segs->ds_addr; } static int mlx_sglist_map(struct mlx_softc *sc) { size_t segsize; int error, ncmd; debug_called(1); /* destroy any existing mappings */ if (sc->mlx_sgbusaddr) bus_dmamap_unload(sc->mlx_sg_dmat, sc->mlx_sg_dmamap); if (sc->mlx_sgtable) bus_dmamem_free(sc->mlx_sg_dmat, sc->mlx_sgtable, sc->mlx_sg_dmamap); if (sc->mlx_sg_dmat) bus_dma_tag_destroy(sc->mlx_sg_dmat); sc->mlx_sgbusaddr = 0; sc->mlx_sgtable = NULL; sc->mlx_sg_dmat = NULL; /* * Create a single tag describing a region large enough to hold all of * the s/g lists we will need. If we're called early on, we don't know how * many commands we're going to be asked to support, so only allocate enough * for a couple. */ if (sc->mlx_enq2 == NULL) { ncmd = 2; } else { ncmd = sc->mlx_enq2->me_max_commands; } segsize = sizeof(struct mlx_sgentry) * MLX_NSEG * ncmd; error = bus_dma_tag_create(sc->mlx_parent_dmat, /* parent */ 1, 0, /* alignment,boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ segsize, 1, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mlx_sg_dmat); if (error != 0) { device_printf(sc->mlx_dev, "can't allocate scatter/gather DMA tag\n"); return(ENOMEM); } /* * Allocate enough s/g maps for all commands and permanently map them into * controller-visible space. * * XXX this assumes we can get enough space for all the s/g maps in one * contiguous slab. We may need to switch to a more complex arrangement * where we allocate in smaller chunks and keep a lookup table from slot * to bus address. */ error = bus_dmamem_alloc(sc->mlx_sg_dmat, (void **)&sc->mlx_sgtable, BUS_DMA_NOWAIT, &sc->mlx_sg_dmamap); if (error) { device_printf(sc->mlx_dev, "can't allocate s/g table\n"); return(ENOMEM); } (void)bus_dmamap_load(sc->mlx_sg_dmat, sc->mlx_sg_dmamap, sc->mlx_sgtable, segsize, mlx_dma_map_sg, sc, 0); return(0); } /******************************************************************************** * Initialise the controller and softc */ int mlx_attach(struct mlx_softc *sc) { struct mlx_enquiry_old *meo; int rid, error, fwminor, hscode, hserror, hsparam1, hsparam2, hsmsg; debug_called(1); /* * Initialise per-controller queues. */ TAILQ_INIT(&sc->mlx_work); TAILQ_INIT(&sc->mlx_freecmds); bioq_init(&sc->mlx_bioq); /* * Select accessor methods based on controller interface type. */ switch(sc->mlx_iftype) { case MLX_IFTYPE_2: case MLX_IFTYPE_3: sc->mlx_tryqueue = mlx_v3_tryqueue; sc->mlx_findcomplete = mlx_v3_findcomplete; sc->mlx_intaction = mlx_v3_intaction; sc->mlx_fw_handshake = mlx_v3_fw_handshake; break; case MLX_IFTYPE_4: sc->mlx_tryqueue = mlx_v4_tryqueue; sc->mlx_findcomplete = mlx_v4_findcomplete; sc->mlx_intaction = mlx_v4_intaction; sc->mlx_fw_handshake = mlx_v4_fw_handshake; break; case MLX_IFTYPE_5: sc->mlx_tryqueue = mlx_v5_tryqueue; sc->mlx_findcomplete = mlx_v5_findcomplete; sc->mlx_intaction = mlx_v5_intaction; sc->mlx_fw_handshake = mlx_v5_fw_handshake; break; default: return(ENXIO); /* should never happen */ } /* disable interrupts before we start talking to the controller */ MLX_IO_LOCK(sc); sc->mlx_intaction(sc, MLX_INTACTION_DISABLE); MLX_IO_UNLOCK(sc); /* * Wait for the controller to come ready, handshake with the firmware if required. * This is typically only necessary on platforms where the controller BIOS does not * run. */ hsmsg = 0; DELAY(1000); while ((hscode = sc->mlx_fw_handshake(sc, &hserror, &hsparam1, &hsparam2, hsmsg == 0)) != 0) { /* report first time around... */ if (hsmsg == 0) { device_printf(sc->mlx_dev, "controller initialisation in progress...\n"); hsmsg = 1; } /* did we get a real message? */ if (hscode == 2) { hscode = mlx_fw_message(sc, hserror, hsparam1, hsparam2); /* fatal initialisation error? */ if (hscode != 0) { return(ENXIO); } } } if (hsmsg == 1) device_printf(sc->mlx_dev, "initialisation complete.\n"); /* * Allocate and connect our interrupt. */ rid = 0; sc->mlx_irq = bus_alloc_resource_any(sc->mlx_dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->mlx_irq == NULL) { device_printf(sc->mlx_dev, "can't allocate interrupt\n"); return(ENXIO); } error = bus_setup_intr(sc->mlx_dev, sc->mlx_irq, INTR_TYPE_BIO | INTR_ENTROPY | INTR_MPSAFE, NULL, mlx_intr, sc, &sc->mlx_intr); if (error) { device_printf(sc->mlx_dev, "can't set up interrupt\n"); return(ENXIO); } /* * Create DMA tag for mapping buffers into controller-addressable space. */ error = bus_dma_tag_create(sc->mlx_parent_dmat, /* parent */ 1, 0, /* align, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MLX_MAXPHYS, /* maxsize */ MLX_NSEG, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ busdma_lock_mutex, /* lockfunc */ &sc->mlx_io_lock, /* lockarg */ &sc->mlx_buffer_dmat); if (error != 0) { device_printf(sc->mlx_dev, "can't allocate buffer DMA tag\n"); return(ENOMEM); } /* * Create some initial scatter/gather mappings so we can run the probe * commands. */ error = mlx_sglist_map(sc); if (error != 0) { device_printf(sc->mlx_dev, "can't make initial s/g list mapping\n"); return(error); } /* * We don't (yet) know where the event log is up to. */ sc->mlx_currevent = -1; /* * Obtain controller feature information */ MLX_IO_LOCK(sc); if ((sc->mlx_enq2 = mlx_enquire(sc, MLX_CMD_ENQUIRY2, sizeof(struct mlx_enquiry2), NULL)) == NULL) { MLX_IO_UNLOCK(sc); device_printf(sc->mlx_dev, "ENQUIRY2 failed\n"); return(ENXIO); } /* * Do quirk/feature related things. */ fwminor = (sc->mlx_enq2->me_firmware_id >> 8) & 0xff; switch(sc->mlx_iftype) { case MLX_IFTYPE_2: /* These controllers don't report the firmware version in the ENQUIRY2 response */ if ((meo = mlx_enquire(sc, MLX_CMD_ENQUIRY_OLD, sizeof(struct mlx_enquiry_old), NULL)) == NULL) { MLX_IO_UNLOCK(sc); device_printf(sc->mlx_dev, "ENQUIRY_OLD failed\n"); return(ENXIO); } sc->mlx_enq2->me_firmware_id = ('0' << 24) | (0 << 16) | (meo->me_fwminor << 8) | meo->me_fwmajor; /* XXX require 2.42 or better (PCI) */ if (meo->me_fwminor < 42) { device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); device_printf(sc->mlx_dev, " *** WARNING *** Use revision 2.42 or later\n"); } free(meo, M_DEVBUF); break; case MLX_IFTYPE_3: /* XXX certify 3.52? */ if (fwminor < 51) { device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); device_printf(sc->mlx_dev, " *** WARNING *** Use revision 3.51 or later\n"); } break; case MLX_IFTYPE_4: /* XXX certify firmware versions? */ if (fwminor < 6) { device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); device_printf(sc->mlx_dev, " *** WARNING *** Use revision 4.06 or later\n"); } break; case MLX_IFTYPE_5: if (fwminor < 7) { device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n"); device_printf(sc->mlx_dev, " *** WARNING *** Use revision 5.07 or later\n"); } break; default: MLX_IO_UNLOCK(sc); return(ENXIO); /* should never happen */ } MLX_IO_UNLOCK(sc); /* * Create the final scatter/gather mappings now that we have characterised the controller. */ error = mlx_sglist_map(sc); if (error != 0) { device_printf(sc->mlx_dev, "can't make final s/g list mapping\n"); return(error); } /* * No user-requested background operation is in progress. */ sc->mlx_background = 0; sc->mlx_rebuildstat.rs_code = MLX_REBUILDSTAT_IDLE; /* * Create the control device. */ sc->mlx_dev_t = make_dev(&mlx_cdevsw, 0, UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "mlx%d", device_get_unit(sc->mlx_dev)); sc->mlx_dev_t->si_drv1 = sc; /* * Start the timeout routine. */ callout_reset(&sc->mlx_timeout, hz, mlx_periodic, sc); /* print a little information about the controller */ mlx_describe_controller(sc); return(0); } /******************************************************************************** * Locate disk resources and attach children to them. */ void mlx_startup(struct mlx_softc *sc) { struct mlx_enq_sys_drive *mes; struct mlx_sysdrive *dr; int i; debug_called(1); /* * Scan all the system drives and attach children for those that * don't currently have them. */ MLX_IO_LOCK(sc); mes = mlx_enquire(sc, MLX_CMD_ENQSYSDRIVE, sizeof(*mes) * MLX_MAXDRIVES, NULL); MLX_IO_UNLOCK(sc); if (mes == NULL) { device_printf(sc->mlx_dev, "error fetching drive status\n"); return; } /* iterate over drives returned */ MLX_CONFIG_LOCK(sc); for (i = 0, dr = &sc->mlx_sysdrive[0]; (i < MLX_MAXDRIVES) && (mes[i].sd_size != 0xffffffff); i++, dr++) { /* are we already attached to this drive? */ if (dr->ms_disk == 0) { /* pick up drive information */ dr->ms_size = mes[i].sd_size; dr->ms_raidlevel = mes[i].sd_raidlevel & 0xf; dr->ms_state = mes[i].sd_state; /* generate geometry information */ if (sc->mlx_geom == MLX_GEOM_128_32) { dr->ms_heads = 128; dr->ms_sectors = 32; dr->ms_cylinders = dr->ms_size / (128 * 32); } else { /* MLX_GEOM_255/63 */ dr->ms_heads = 255; dr->ms_sectors = 63; dr->ms_cylinders = dr->ms_size / (255 * 63); } dr->ms_disk = device_add_child(sc->mlx_dev, /*"mlxd"*/NULL, DEVICE_UNIT_ANY); if (dr->ms_disk == 0) device_printf(sc->mlx_dev, "device_add_child failed\n"); device_set_ivars(dr->ms_disk, dr); } } free(mes, M_DEVBUF); bus_attach_children(sc->mlx_dev); /* mark controller back up */ MLX_IO_LOCK(sc); sc->mlx_state &= ~MLX_STATE_SHUTDOWN; /* enable interrupts */ sc->mlx_intaction(sc, MLX_INTACTION_ENABLE); MLX_IO_UNLOCK(sc); MLX_CONFIG_UNLOCK(sc); } /******************************************************************************** * Disconnect from the controller completely, in preparation for unload. */ int mlx_detach(device_t dev) { struct mlx_softc *sc = device_get_softc(dev); struct mlxd_softc *mlxd; int i, error; debug_called(1); error = EBUSY; MLX_CONFIG_LOCK(sc); if (sc->mlx_state & MLX_STATE_OPEN) goto out; for (i = 0; i < MLX_MAXDRIVES; i++) { if (sc->mlx_sysdrive[i].ms_disk != 0) { mlxd = device_get_softc(sc->mlx_sysdrive[i].ms_disk); if (mlxd->mlxd_flags & MLXD_OPEN) { /* drive is mounted, abort detach */ device_printf(sc->mlx_sysdrive[i].ms_disk, "still open, can't detach\n"); goto out; } } } if ((error = mlx_shutdown(dev))) goto out; MLX_CONFIG_UNLOCK(sc); mlx_free(sc); return (0); out: MLX_CONFIG_UNLOCK(sc); return(error); } /******************************************************************************** * Bring the controller down to a dormant state and detach all child devices. * * This function is called before detach, system shutdown, or before performing * an operation which may add or delete system disks. (Call mlx_startup to * resume normal operation.) * * Note that we can assume that the bioq on the controller is empty, as we won't * allow shutdown if any device is open. */ int mlx_shutdown(device_t dev) { struct mlx_softc *sc = device_get_softc(dev); int error; MLX_CONFIG_LOCK(sc); error = mlx_shutdown_locked(sc); MLX_CONFIG_UNLOCK(sc); return (error); } static int mlx_shutdown_locked(struct mlx_softc *sc) { - int i, error; + int error; debug_called(1); MLX_CONFIG_ASSERT_LOCKED(sc); MLX_IO_LOCK(sc); sc->mlx_state |= MLX_STATE_SHUTDOWN; sc->mlx_intaction(sc, MLX_INTACTION_DISABLE); /* flush controller */ device_printf(sc->mlx_dev, "flushing cache..."); if (mlx_flush(sc)) { printf("failed\n"); } else { printf("done\n"); } MLX_IO_UNLOCK(sc); - + /* delete all our child devices */ - for (i = 0; i < MLX_MAXDRIVES; i++) { - if (sc->mlx_sysdrive[i].ms_disk != 0) { - if ((error = device_delete_child(sc->mlx_dev, sc->mlx_sysdrive[i].ms_disk)) != 0) - return (error); - sc->mlx_sysdrive[i].ms_disk = 0; - } - } + error = bus_generic_detach(sc->mlx_dev); - return (0); + return (error); } /******************************************************************************** * Bring the controller to a quiescent state, ready for system suspend. */ int mlx_suspend(device_t dev) { struct mlx_softc *sc = device_get_softc(dev); debug_called(1); MLX_IO_LOCK(sc); sc->mlx_state |= MLX_STATE_SUSPEND; /* flush controller */ device_printf(sc->mlx_dev, "flushing cache..."); printf("%s\n", mlx_flush(sc) ? "failed" : "done"); sc->mlx_intaction(sc, MLX_INTACTION_DISABLE); MLX_IO_UNLOCK(sc); return(0); } /******************************************************************************** * Bring the controller back to a state ready for operation. */ int mlx_resume(device_t dev) { struct mlx_softc *sc = device_get_softc(dev); debug_called(1); MLX_IO_LOCK(sc); sc->mlx_state &= ~MLX_STATE_SUSPEND; sc->mlx_intaction(sc, MLX_INTACTION_ENABLE); MLX_IO_UNLOCK(sc); return(0); } /******************************************************************************* * Take an interrupt, or be poked by other code to look for interrupt-worthy * status. */ void mlx_intr(void *arg) { struct mlx_softc *sc = (struct mlx_softc *)arg; debug_called(1); /* collect finished commands, queue anything waiting */ MLX_IO_LOCK(sc); mlx_done(sc, 1); MLX_IO_UNLOCK(sc); }; /******************************************************************************* * Receive a buf structure from a child device and queue it on a particular * disk resource, then poke the disk resource to start as much work as it can. */ int mlx_submit_buf(struct mlx_softc *sc, struct bio *bp) { debug_called(1); MLX_IO_ASSERT_LOCKED(sc); bioq_insert_tail(&sc->mlx_bioq, bp); sc->mlx_waitbufs++; mlx_startio(sc); return(0); } /******************************************************************************** * Accept an open operation on the control device. */ int mlx_open(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mlx_softc *sc = dev->si_drv1; MLX_CONFIG_LOCK(sc); MLX_IO_LOCK(sc); sc->mlx_state |= MLX_STATE_OPEN; MLX_IO_UNLOCK(sc); MLX_CONFIG_UNLOCK(sc); return(0); } /******************************************************************************** * Accept the last close on the control device. */ int mlx_close(struct cdev *dev, int flags, int fmt, struct thread *td) { struct mlx_softc *sc = dev->si_drv1; MLX_CONFIG_LOCK(sc); MLX_IO_LOCK(sc); sc->mlx_state &= ~MLX_STATE_OPEN; MLX_IO_UNLOCK(sc); MLX_CONFIG_UNLOCK(sc); return (0); } /******************************************************************************** * Handle controller-specific control operations. */ int mlx_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { struct mlx_softc *sc = dev->si_drv1; struct mlx_rebuild_request *rb = (struct mlx_rebuild_request *)addr; struct mlx_rebuild_status *rs = (struct mlx_rebuild_status *)addr; int *arg = (int *)addr; struct mlx_pause *mp; struct mlx_sysdrive *dr; struct mlxd_softc *mlxd; int i, error; switch(cmd) { /* * Enumerate connected system drives; returns the first system drive's * unit number if *arg is -1, or the next unit after *arg if it's * a valid unit on this controller. */ case MLX_NEXT_CHILD: /* search system drives */ MLX_CONFIG_LOCK(sc); for (i = 0; i < MLX_MAXDRIVES; i++) { /* is this one attached? */ if (sc->mlx_sysdrive[i].ms_disk != 0) { /* looking for the next one we come across? */ if (*arg == -1) { *arg = device_get_unit(sc->mlx_sysdrive[i].ms_disk); MLX_CONFIG_UNLOCK(sc); return(0); } /* we want the one after this one */ if (*arg == device_get_unit(sc->mlx_sysdrive[i].ms_disk)) *arg = -1; } } MLX_CONFIG_UNLOCK(sc); return(ENOENT); /* * Scan the controller to see whether new drives have appeared. */ case MLX_RESCAN_DRIVES: bus_topo_lock(); mlx_startup(sc); bus_topo_unlock(); return(0); /* * Disconnect from the specified drive; it may be about to go * away. */ case MLX_DETACH_DRIVE: /* detach one drive */ MLX_CONFIG_LOCK(sc); if (((dr = mlx_findunit(sc, *arg)) == NULL) || ((mlxd = device_get_softc(dr->ms_disk)) == NULL)) { MLX_CONFIG_UNLOCK(sc); return(ENOENT); } device_printf(dr->ms_disk, "detaching..."); error = 0; if (mlxd->mlxd_flags & MLXD_OPEN) { error = EBUSY; goto detach_out; } /* flush controller */ MLX_IO_LOCK(sc); if (mlx_flush(sc)) { MLX_IO_UNLOCK(sc); error = EBUSY; goto detach_out; } MLX_IO_UNLOCK(sc); /* nuke drive */ if ((error = device_delete_child(sc->mlx_dev, dr->ms_disk)) != 0) goto detach_out; dr->ms_disk = 0; detach_out: MLX_CONFIG_UNLOCK(sc); if (error) { printf("failed\n"); } else { printf("done\n"); } return(error); /* * Pause one or more SCSI channels for a period of time, to assist * in the process of hot-swapping devices. * * Note that at least the 3.51 firmware on the DAC960PL doesn't seem * to do this right. */ case MLX_PAUSE_CHANNEL: /* schedule a channel pause */ /* Does this command work on this firmware? */ if (!(sc->mlx_feature & MLX_FEAT_PAUSEWORKS)) return(EOPNOTSUPP); /* check time values */ mp = (struct mlx_pause *)addr; if ((mp->mp_when < 0) || (mp->mp_when > 3600)) return(EINVAL); if ((mp->mp_howlong < 1) || (mp->mp_howlong > (0xf * 30))) return(EINVAL); MLX_IO_LOCK(sc); if ((mp->mp_which == MLX_PAUSE_CANCEL) && (sc->mlx_pause.mp_when != 0)) { /* cancel a pending pause operation */ sc->mlx_pause.mp_which = 0; } else { /* fix for legal channels */ mp->mp_which &= ((1 << sc->mlx_enq2->me_actual_channels) -1); /* check for a pause currently running */ if ((sc->mlx_pause.mp_which != 0) && (sc->mlx_pause.mp_when == 0)) { MLX_IO_UNLOCK(sc); return(EBUSY); } /* looks ok, go with it */ sc->mlx_pause.mp_which = mp->mp_which; sc->mlx_pause.mp_when = time_second + mp->mp_when; sc->mlx_pause.mp_howlong = sc->mlx_pause.mp_when + mp->mp_howlong; } MLX_IO_UNLOCK(sc); return(0); /* * Accept a command passthrough-style. */ case MLX_COMMAND: return(mlx_user_command(sc, (struct mlx_usercommand *)addr)); /* * Start a rebuild on a given SCSI disk */ case MLX_REBUILDASYNC: MLX_IO_LOCK(sc); if (sc->mlx_background != 0) { MLX_IO_UNLOCK(sc); rb->rr_status = 0x0106; return(EBUSY); } rb->rr_status = mlx_rebuild(sc, rb->rr_channel, rb->rr_target); switch (rb->rr_status) { case 0: error = 0; break; case 0x10000: error = ENOMEM; /* couldn't set up the command */ break; case 0x0002: error = EBUSY; break; case 0x0104: error = EIO; break; case 0x0105: error = ERANGE; break; case 0x0106: error = EBUSY; break; default: error = EINVAL; break; } if (error == 0) sc->mlx_background = MLX_BACKGROUND_REBUILD; MLX_IO_UNLOCK(sc); return(error); /* * Get the status of the current rebuild or consistency check. */ case MLX_REBUILDSTAT: MLX_IO_LOCK(sc); *rs = sc->mlx_rebuildstat; MLX_IO_UNLOCK(sc); return(0); /* * Return the per-controller system drive number matching the * disk device number in (arg), if it happens to belong to us. */ case MLX_GET_SYSDRIVE: error = ENOENT; MLX_CONFIG_LOCK(sc); bus_topo_lock(); mlxd = devclass_get_softc(devclass_find("mlxd"), *arg); bus_topo_unlock(); if ((mlxd != NULL) && (mlxd->mlxd_drive >= sc->mlx_sysdrive) && (mlxd->mlxd_drive < (sc->mlx_sysdrive + MLX_MAXDRIVES))) { error = 0; *arg = mlxd->mlxd_drive - sc->mlx_sysdrive; } MLX_CONFIG_UNLOCK(sc); return(error); default: return(ENOTTY); } } /******************************************************************************** * Handle operations requested by a System Drive connected to this controller. */ int mlx_submit_ioctl(struct mlx_softc *sc, struct mlx_sysdrive *drive, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) { int *arg = (int *)addr; int error, result; switch(cmd) { /* * Return the current status of this drive. */ case MLXD_STATUS: MLX_IO_LOCK(sc); *arg = drive->ms_state; MLX_IO_UNLOCK(sc); return(0); /* * Start a background consistency check on this drive. */ case MLXD_CHECKASYNC: /* start a background consistency check */ MLX_IO_LOCK(sc); if (sc->mlx_background != 0) { MLX_IO_UNLOCK(sc); *arg = 0x0106; return(EBUSY); } result = mlx_check(sc, drive - &sc->mlx_sysdrive[0]); switch (result) { case 0: error = 0; break; case 0x10000: error = ENOMEM; /* couldn't set up the command */ break; case 0x0002: error = EIO; break; case 0x0105: error = ERANGE; break; case 0x0106: error = EBUSY; break; default: error = EINVAL; break; } if (error == 0) sc->mlx_background = MLX_BACKGROUND_CHECK; MLX_IO_UNLOCK(sc); *arg = result; return(error); } return(ENOIOCTL); } /******************************************************************************** ******************************************************************************** Status Monitoring ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Fire off commands to periodically check the status of connected drives. */ static void mlx_periodic(void *data) { struct mlx_softc *sc = (struct mlx_softc *)data; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* * Run a bus pause? */ if ((sc->mlx_pause.mp_which != 0) && (sc->mlx_pause.mp_when > 0) && (time_second >= sc->mlx_pause.mp_when)){ mlx_pause_action(sc); /* pause is running */ sc->mlx_pause.mp_when = 0; sysbeep(500, SBT_1S); /* * Bus pause still running? */ } else if ((sc->mlx_pause.mp_which != 0) && (sc->mlx_pause.mp_when == 0)) { /* time to stop bus pause? */ if (time_second >= sc->mlx_pause.mp_howlong) { mlx_pause_action(sc); sc->mlx_pause.mp_which = 0; /* pause is complete */ sysbeep(500, SBT_1S); } else { sysbeep((time_second % 5) * 100 + 500, SBT_1S / 8); } /* * Run normal periodic activities? */ } else if (time_second > (sc->mlx_lastpoll + 10)) { sc->mlx_lastpoll = time_second; /* * Check controller status. * * XXX Note that this may not actually launch a command in situations of high load. */ mlx_enquire(sc, (sc->mlx_iftype == MLX_IFTYPE_2) ? MLX_CMD_ENQUIRY_OLD : MLX_CMD_ENQUIRY, imax(sizeof(struct mlx_enquiry), sizeof(struct mlx_enquiry_old)), mlx_periodic_enquiry); /* * Check system drive status. * * XXX This might be better left to event-driven detection, eg. I/O to an offline * drive will detect it's offline, rebuilds etc. should detect the drive is back * online. */ mlx_enquire(sc, MLX_CMD_ENQSYSDRIVE, sizeof(struct mlx_enq_sys_drive) * MLX_MAXDRIVES, mlx_periodic_enquiry); } /* get drive rebuild/check status */ /* XXX should check sc->mlx_background if this is only valid while in progress */ mlx_enquire(sc, MLX_CMD_REBUILDSTAT, sizeof(struct mlx_rebuild_stat), mlx_periodic_rebuild); /* deal with possibly-missed interrupts and timed-out commands */ mlx_done(sc, 1); /* reschedule another poll next second or so */ callout_reset(&sc->mlx_timeout, hz, mlx_periodic, sc); } /******************************************************************************** * Handle the result of an ENQUIRY command instigated by periodic status polling. */ static void mlx_periodic_enquiry(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* Command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "periodic enquiry failed - %s\n", mlx_diagnose_command(mc)); goto out; } /* respond to command */ switch(mc->mc_mailbox[0]) { /* * This is currently a bit fruitless, as we don't know how to extract the eventlog * pointer yet. */ case MLX_CMD_ENQUIRY_OLD: { struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data; struct mlx_enquiry_old *meo = (struct mlx_enquiry_old *)mc->mc_data; int i; /* convert data in-place to new format */ for (i = (sizeof(me->me_dead) / sizeof(me->me_dead[0])) - 1; i >= 0; i--) { me->me_dead[i].dd_chan = meo->me_dead[i].dd_chan; me->me_dead[i].dd_targ = meo->me_dead[i].dd_targ; } me->me_misc_flags = 0; me->me_rebuild_count = meo->me_rebuild_count; me->me_dead_count = meo->me_dead_count; me->me_critical_sd_count = meo->me_critical_sd_count; me->me_event_log_seq_num = 0; me->me_offline_sd_count = meo->me_offline_sd_count; me->me_max_commands = meo->me_max_commands; me->me_rebuild_flag = meo->me_rebuild_flag; me->me_fwmajor = meo->me_fwmajor; me->me_fwminor = meo->me_fwminor; me->me_status_flags = meo->me_status_flags; me->me_flash_age = meo->me_flash_age; for (i = (sizeof(me->me_drvsize) / sizeof(me->me_drvsize[0])) - 1; i >= 0; i--) { if (i > ((sizeof(meo->me_drvsize) / sizeof(meo->me_drvsize[0])) - 1)) { me->me_drvsize[i] = 0; /* drive beyond supported range */ } else { me->me_drvsize[i] = meo->me_drvsize[i]; } } me->me_num_sys_drvs = meo->me_num_sys_drvs; } /* FALLTHROUGH */ /* * Generic controller status update. We could do more with this than just * checking the event log. */ case MLX_CMD_ENQUIRY: { struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data; if (sc->mlx_currevent == -1) { /* initialise our view of the event log */ sc->mlx_currevent = sc->mlx_lastevent = me->me_event_log_seq_num; } else if ((me->me_event_log_seq_num != sc->mlx_lastevent) && !(sc->mlx_flags & MLX_EVENTLOG_BUSY)) { /* record where current events are up to */ sc->mlx_currevent = me->me_event_log_seq_num; debug(1, "event log pointer was %d, now %d\n", sc->mlx_lastevent, sc->mlx_currevent); /* mark the event log as busy */ sc->mlx_flags |= MLX_EVENTLOG_BUSY; /* drain new eventlog entries */ mlx_periodic_eventlog_poll(sc); } break; } case MLX_CMD_ENQSYSDRIVE: { struct mlx_enq_sys_drive *mes = (struct mlx_enq_sys_drive *)mc->mc_data; struct mlx_sysdrive *dr; int i; for (i = 0, dr = &sc->mlx_sysdrive[0]; (i < MLX_MAXDRIVES) && (mes[i].sd_size != 0xffffffff); i++) { /* has state been changed by controller? */ if (dr->ms_state != mes[i].sd_state) { switch(mes[i].sd_state) { case MLX_SYSD_OFFLINE: device_printf(dr->ms_disk, "drive offline\n"); break; case MLX_SYSD_ONLINE: device_printf(dr->ms_disk, "drive online\n"); break; case MLX_SYSD_CRITICAL: device_printf(dr->ms_disk, "drive critical\n"); break; } /* save new state */ dr->ms_state = mes[i].sd_state; } } break; } default: device_printf(sc->mlx_dev, "%s: unknown command 0x%x", __func__, mc->mc_mailbox[0]); break; } out: free(mc->mc_data, M_DEVBUF); mlx_releasecmd(mc); } static void mlx_eventlog_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_command *mc; mc = (struct mlx_command *)arg; mlx_setup_dmamap(mc, segs, nsegments, error); /* build the command to get one entry */ mlx_make_type3(mc, MLX_CMD_LOGOP, MLX_LOGOP_GET, 1, mc->mc_sc->mlx_lastevent, 0, 0, mc->mc_dataphys, 0); mc->mc_complete = mlx_periodic_eventlog_respond; mc->mc_private = mc; /* start the command */ if (mlx_start(mc) != 0) { mlx_releasecmd(mc); free(mc->mc_data, M_DEVBUF); mc->mc_data = NULL; } } /******************************************************************************** * Instigate a poll for one event log message on (sc). * We only poll for one message at a time, to keep our command usage down. */ static void mlx_periodic_eventlog_poll(struct mlx_softc *sc) { struct mlx_command *mc; void *result = NULL; int error = 0; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 1; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* allocate the response structure */ if ((result = malloc(/*sizeof(struct mlx_eventlog_entry)*/1024, M_DEVBUF, M_NOWAIT)) == NULL) goto out; /* get a command slot */ if (mlx_getslot(mc)) goto out; /* map the command so the controller can see it */ mc->mc_data = result; mc->mc_length = /*sizeof(struct mlx_eventlog_entry)*/1024; error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, mc->mc_length, mlx_eventlog_cb, mc, BUS_DMA_NOWAIT); out: if (error != 0) { if (mc != NULL) mlx_releasecmd(mc); if ((result != NULL) && (mc->mc_data != NULL)) free(result, M_DEVBUF); } } /******************************************************************************** * Handle the result of polling for a log message, generate diagnostic output. * If this wasn't the last message waiting for us, we'll go collect another. */ static char *mlx_sense_messages[] = { "because write recovery failed", "because of SCSI bus reset failure", "because of double check condition", "because it was removed", "because of gross error on SCSI chip", "because of bad tag returned from drive", "because of timeout on SCSI command", "because of reset SCSI command issued from system", "because busy or parity error count exceeded limit", "because of 'kill drive' command from system", "because of selection timeout", "due to SCSI phase sequence error", "due to unknown status" }; static void mlx_periodic_eventlog_respond(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; struct mlx_eventlog_entry *el = (struct mlx_eventlog_entry *)mc->mc_data; char *reason; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); sc->mlx_lastevent++; /* next message... */ if (mc->mc_status == 0) { /* handle event log message */ switch(el->el_type) { /* * This is the only sort of message we understand at the moment. * The tests here are probably incomplete. */ case MLX_LOGMSG_SENSE: /* sense data */ /* Mylex vendor-specific message indicating a drive was killed? */ if ((el->el_sensekey == 9) && (el->el_asc == 0x80)) { if (el->el_asq < nitems(mlx_sense_messages)) { reason = mlx_sense_messages[el->el_asq]; } else { reason = "for unknown reason"; } device_printf(sc->mlx_dev, "physical drive %d:%d killed %s\n", el->el_channel, el->el_target, reason); } /* SCSI drive was reset? */ if ((el->el_sensekey == 6) && (el->el_asc == 0x29)) { device_printf(sc->mlx_dev, "physical drive %d:%d reset\n", el->el_channel, el->el_target); } /* SCSI drive error? */ if (!((el->el_sensekey == 0) || ((el->el_sensekey == 2) && (el->el_asc == 0x04) && ((el->el_asq == 0x01) || (el->el_asq == 0x02))))) { device_printf(sc->mlx_dev, "physical drive %d:%d error log: sense = %d asc = %x asq = %x\n", el->el_channel, el->el_target, el->el_sensekey, el->el_asc, el->el_asq); device_printf(sc->mlx_dev, " info %4D csi %4D\n", el->el_information, ":", el->el_csi, ":"); } break; default: device_printf(sc->mlx_dev, "unknown log message type 0x%x\n", el->el_type); break; } } else { device_printf(sc->mlx_dev, "error reading message log - %s\n", mlx_diagnose_command(mc)); /* give up on all the outstanding messages, as we may have come unsynched */ sc->mlx_lastevent = sc->mlx_currevent; } /* dispose of command and data */ free(mc->mc_data, M_DEVBUF); mlx_releasecmd(mc); /* is there another message to obtain? */ if (sc->mlx_lastevent != sc->mlx_currevent) { mlx_periodic_eventlog_poll(sc); } else { /* clear log-busy status */ sc->mlx_flags &= ~MLX_EVENTLOG_BUSY; } } /******************************************************************************** * Handle check/rebuild operations in progress. */ static void mlx_periodic_rebuild(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; struct mlx_rebuild_status *mr = (struct mlx_rebuild_status *)mc->mc_data; MLX_IO_ASSERT_LOCKED(sc); switch(mc->mc_status) { case 0: /* operation running, update stats */ sc->mlx_rebuildstat = *mr; /* spontaneous rebuild/check? */ if (sc->mlx_background == 0) { sc->mlx_background = MLX_BACKGROUND_SPONTANEOUS; device_printf(sc->mlx_dev, "background check/rebuild operation started\n"); } break; case 0x0105: /* nothing running, finalise stats and report */ switch(sc->mlx_background) { case MLX_BACKGROUND_CHECK: device_printf(sc->mlx_dev, "consistency check completed\n"); /* XXX print drive? */ break; case MLX_BACKGROUND_REBUILD: device_printf(sc->mlx_dev, "drive rebuild completed\n"); /* XXX print channel/target? */ break; case MLX_BACKGROUND_SPONTANEOUS: default: /* if we have previously been non-idle, report the transition */ if (sc->mlx_rebuildstat.rs_code != MLX_REBUILDSTAT_IDLE) { device_printf(sc->mlx_dev, "background check/rebuild operation completed\n"); } } sc->mlx_background = 0; sc->mlx_rebuildstat.rs_code = MLX_REBUILDSTAT_IDLE; break; } free(mc->mc_data, M_DEVBUF); mlx_releasecmd(mc); } /******************************************************************************** ******************************************************************************** Channel Pause ******************************************************************************** ********************************************************************************/ /******************************************************************************** * It's time to perform a channel pause action for (sc), either start or stop * the pause. */ static void mlx_pause_action(struct mlx_softc *sc) { struct mlx_command *mc; int failsafe, i, command; MLX_IO_ASSERT_LOCKED(sc); /* What are we doing here? */ if (sc->mlx_pause.mp_when == 0) { command = MLX_CMD_STARTCHANNEL; failsafe = 0; } else { command = MLX_CMD_STOPCHANNEL; /* * Channels will always start again after the failsafe period, * which is specified in multiples of 30 seconds. * This constrains us to a maximum pause of 450 seconds. */ failsafe = ((sc->mlx_pause.mp_howlong - time_second) + 5) / 30; if (failsafe > 0xf) { failsafe = 0xf; sc->mlx_pause.mp_howlong = time_second + (0xf * 30) - 5; } } /* build commands for every channel requested */ for (i = 0; i < sc->mlx_enq2->me_actual_channels; i++) { if ((1 << i) & sc->mlx_pause.mp_which) { /* get ourselves a command buffer */ if ((mc = mlx_alloccmd(sc)) == NULL) goto fail; /* get a command slot */ mc->mc_flags |= MLX_CMD_PRIORITY; if (mlx_getslot(mc)) goto fail; /* build the command */ mlx_make_type2(mc, command, (failsafe << 4) | i, 0, 0, 0, 0, 0, 0, 0); mc->mc_complete = mlx_pause_done; mc->mc_private = sc; /* XXX not needed */ if (mlx_start(mc)) goto fail; /* command submitted OK */ return; fail: device_printf(sc->mlx_dev, "%s failed for channel %d\n", command == MLX_CMD_STOPCHANNEL ? "pause" : "resume", i); if (mc != NULL) mlx_releasecmd(mc); } } } static void mlx_pause_done(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int command = mc->mc_mailbox[0]; int channel = mc->mc_mailbox[2] & 0xf; MLX_IO_ASSERT_LOCKED(sc); if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "%s command failed - %s\n", command == MLX_CMD_STOPCHANNEL ? "pause" : "resume", mlx_diagnose_command(mc)); } else if (command == MLX_CMD_STOPCHANNEL) { device_printf(sc->mlx_dev, "channel %d pausing for %ld seconds\n", channel, (long)(sc->mlx_pause.mp_howlong - time_second)); } else { device_printf(sc->mlx_dev, "channel %d resuming\n", channel); } mlx_releasecmd(mc); } /******************************************************************************** ******************************************************************************** Command Submission ******************************************************************************** ********************************************************************************/ static void mlx_enquire_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_softc *sc; struct mlx_command *mc; mc = (struct mlx_command *)arg; if (error) return; mlx_setup_dmamap(mc, segs, nsegments, error); /* build an enquiry command */ sc = mc->mc_sc; mlx_make_type2(mc, mc->mc_command, 0, 0, 0, 0, 0, 0, mc->mc_dataphys, 0); /* do we want a completion callback? */ if (mc->mc_complete != NULL) { if ((error = mlx_start(mc)) != 0) return; } else { /* run the command in either polled or wait mode */ if ((sc->mlx_state & MLX_STATE_INTEN) ? mlx_wait_command(mc) : mlx_poll_command(mc)) return; /* command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "ENQUIRY failed - %s\n", mlx_diagnose_command(mc)); return; } } } /******************************************************************************** * Perform an Enquiry command using a type-3 command buffer and a return a single * linear result buffer. If the completion function is specified, it will * be called with the completed command (and the result response will not be * valid until that point). Otherwise, the command will either be busy-waited * for (interrupts not enabled), or slept for. */ static void * mlx_enquire(struct mlx_softc *sc, int command, size_t bufsize, void (* complete)(struct mlx_command *mc)) { struct mlx_command *mc; void *result; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 1; result = NULL; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* allocate the response structure */ if ((result = malloc(bufsize, M_DEVBUF, M_NOWAIT)) == NULL) goto out; /* get a command slot */ mc->mc_flags |= MLX_CMD_PRIORITY | MLX_CMD_DATAOUT; if (mlx_getslot(mc)) goto out; /* map the command so the controller can see it */ mc->mc_data = result; mc->mc_length = bufsize; mc->mc_command = command; if (complete != NULL) { mc->mc_complete = complete; mc->mc_private = mc; } error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, mc->mc_length, mlx_enquire_cb, mc, BUS_DMA_NOWAIT); out: /* we got a command, but nobody else will free it */ if ((mc != NULL) && (mc->mc_complete == NULL)) mlx_releasecmd(mc); /* we got an error, and we allocated a result */ if ((error != 0) && (result != NULL)) { free(result, M_DEVBUF); result = NULL; } return(result); } /******************************************************************************** * Perform a Flush command on the nominated controller. * * May be called with interrupts enabled or disabled; will not return until * the flush operation completes or fails. */ static int mlx_flush(struct mlx_softc *sc) { struct mlx_command *mc; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 1; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* get a command slot */ if (mlx_getslot(mc)) goto out; /* build a flush command */ mlx_make_type2(mc, MLX_CMD_FLUSH, 0, 0, 0, 0, 0, 0, 0, 0); /* can't assume that interrupts are going to work here, so play it safe */ if (mlx_poll_command(mc)) goto out; /* command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "FLUSH failed - %s\n", mlx_diagnose_command(mc)); goto out; } error = 0; /* success */ out: if (mc != NULL) mlx_releasecmd(mc); return(error); } /******************************************************************************** * Start a background consistency check on (drive). * * May be called with interrupts enabled or disabled; will return as soon as the * operation has started or been refused. */ static int mlx_check(struct mlx_softc *sc, int drive) { struct mlx_command *mc; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 0x10000; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* get a command slot */ if (mlx_getslot(mc)) goto out; /* build a checkasync command, set the "fix it" flag */ mlx_make_type2(mc, MLX_CMD_CHECKASYNC, 0, 0, 0, 0, 0, drive | 0x80, 0, 0); /* start the command and wait for it to be returned */ if (mlx_wait_command(mc)) goto out; /* command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "CHECK ASYNC failed - %s\n", mlx_diagnose_command(mc)); } else { device_printf(sc->mlx_sysdrive[drive].ms_disk, "consistency check started"); } error = mc->mc_status; out: if (mc != NULL) mlx_releasecmd(mc); return(error); } /******************************************************************************** * Start a background rebuild of the physical drive at (channel),(target). * * May be called with interrupts enabled or disabled; will return as soon as the * operation has started or been refused. */ static int mlx_rebuild(struct mlx_softc *sc, int channel, int target) { struct mlx_command *mc; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* get ourselves a command buffer */ error = 0x10000; if ((mc = mlx_alloccmd(sc)) == NULL) goto out; /* get a command slot */ if (mlx_getslot(mc)) goto out; /* build a checkasync command, set the "fix it" flag */ mlx_make_type2(mc, MLX_CMD_REBUILDASYNC, channel, target, 0, 0, 0, 0, 0, 0); /* start the command and wait for it to be returned */ if (mlx_wait_command(mc)) goto out; /* command completed OK? */ if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "REBUILD ASYNC failed - %s\n", mlx_diagnose_command(mc)); } else { device_printf(sc->mlx_dev, "drive rebuild started for %d:%d\n", channel, target); } error = mc->mc_status; out: if (mc != NULL) mlx_releasecmd(mc); return(error); } /******************************************************************************** * Run the command (mc) and return when it completes. * * Interrupts need to be enabled; returns nonzero on error. */ static int mlx_wait_command(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int error, count; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); mc->mc_complete = NULL; mc->mc_private = mc; /* wake us when you're done */ if ((error = mlx_start(mc)) != 0) return(error); count = 0; /* XXX better timeout? */ while ((mc->mc_status == MLX_STATUS_BUSY) && (count < 30)) { mtx_sleep(mc->mc_private, &sc->mlx_io_lock, PRIBIO | PCATCH, "mlxwcmd", hz); } if (mc->mc_status != 0) { device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc)); return(EIO); } return(0); } /******************************************************************************** * Start the command (mc) and busy-wait for it to complete. * * Should only be used when interrupts can't be relied upon. Returns 0 on * success, nonzero on error. * Successfully completed commands are dequeued. */ static int mlx_poll_command(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int error, count; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); mc->mc_complete = NULL; mc->mc_private = NULL; /* we will poll for it */ if ((error = mlx_start(mc)) != 0) return(error); count = 0; do { /* poll for completion */ mlx_done(mc->mc_sc, 1); } while ((mc->mc_status == MLX_STATUS_BUSY) && (count++ < 15000000)); if (mc->mc_status != MLX_STATUS_BUSY) { TAILQ_REMOVE(&sc->mlx_work, mc, mc_link); return(0); } device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc)); return(EIO); } void mlx_startio_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_command *mc; struct mlxd_softc *mlxd; struct mlx_softc *sc; struct bio *bp; int blkcount; int driveno; int cmd; mc = (struct mlx_command *)arg; mlx_setup_dmamap(mc, segs, nsegments, error); sc = mc->mc_sc; bp = mc->mc_private; if (bp->bio_cmd == BIO_READ) { mc->mc_flags |= MLX_CMD_DATAIN; cmd = MLX_CMD_READSG; } else { mc->mc_flags |= MLX_CMD_DATAOUT; cmd = MLX_CMD_WRITESG; } /* build a suitable I/O command (assumes 512-byte rounded transfers) */ mlxd = bp->bio_disk->d_drv1; driveno = mlxd->mlxd_drive - sc->mlx_sysdrive; blkcount = howmany(bp->bio_bcount, MLX_BLKSIZE); if ((bp->bio_pblkno + blkcount) > sc->mlx_sysdrive[driveno].ms_size) device_printf(sc->mlx_dev, "I/O beyond end of unit (%lld,%d > %lu)\n", (long long)bp->bio_pblkno, blkcount, (u_long)sc->mlx_sysdrive[driveno].ms_size); /* * Build the I/O command. Note that the SG list type bits are set to zero, * denoting the format of SG list that we are using. */ if (sc->mlx_iftype == MLX_IFTYPE_2) { mlx_make_type1(mc, (cmd == MLX_CMD_WRITESG) ? MLX_CMD_WRITESG_OLD : MLX_CMD_READSG_OLD, blkcount & 0xff, /* xfer length low byte */ bp->bio_pblkno, /* physical block number */ driveno, /* target drive number */ mc->mc_sgphys, /* location of SG list */ mc->mc_nsgent & 0x3f); /* size of SG list */ } else { mlx_make_type5(mc, cmd, blkcount & 0xff, /* xfer length low byte */ (driveno << 3) | ((blkcount >> 8) & 0x07), /* target+length high 3 bits */ bp->bio_pblkno, /* physical block number */ mc->mc_sgphys, /* location of SG list */ mc->mc_nsgent & 0x3f); /* size of SG list */ } /* try to give command to controller */ if (mlx_start(mc) != 0) { /* fail the command */ mc->mc_status = MLX_STATUS_WEDGED; mlx_completeio(mc); } sc->mlx_state &= ~MLX_STATE_QFROZEN; } /******************************************************************************** * Pull as much work off the softc's work queue as possible and give it to the * controller. Leave a couple of slots free for emergencies. */ static void mlx_startio(struct mlx_softc *sc) { struct mlx_command *mc; struct bio *bp; int error; MLX_IO_ASSERT_LOCKED(sc); /* spin until something prevents us from doing any work */ for (;;) { if (sc->mlx_state & MLX_STATE_QFROZEN) break; /* see if there's work to be done */ if ((bp = bioq_first(&sc->mlx_bioq)) == NULL) break; /* get a command */ if ((mc = mlx_alloccmd(sc)) == NULL) break; /* get a slot for the command */ if (mlx_getslot(mc) != 0) { mlx_releasecmd(mc); break; } /* get the buf containing our work */ bioq_remove(&sc->mlx_bioq, bp); sc->mlx_waitbufs--; /* connect the buf to the command */ mc->mc_complete = mlx_completeio; mc->mc_private = bp; mc->mc_data = bp->bio_data; mc->mc_length = bp->bio_bcount; /* map the command so the controller can work with it */ error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, mc->mc_length, mlx_startio_cb, mc, 0); if (error == EINPROGRESS) { sc->mlx_state |= MLX_STATE_QFROZEN; break; } } } /******************************************************************************** * Handle completion of an I/O command. */ static void mlx_completeio(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; struct bio *bp = mc->mc_private; struct mlxd_softc *mlxd = bp->bio_disk->d_drv1; MLX_IO_ASSERT_LOCKED(sc); if (mc->mc_status != MLX_STATUS_OK) { /* could be more verbose here? */ bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; switch(mc->mc_status) { case MLX_STATUS_RDWROFFLINE: /* system drive has gone offline */ device_printf(mlxd->mlxd_dev, "drive offline\n"); /* should signal this with a return code */ mlxd->mlxd_drive->ms_state = MLX_SYSD_OFFLINE; break; default: /* other I/O error */ device_printf(sc->mlx_dev, "I/O error - %s\n", mlx_diagnose_command(mc)); #if 0 device_printf(sc->mlx_dev, " b_bcount %ld blkcount %ld b_pblkno %d\n", bp->bio_bcount, bp->bio_bcount / MLX_BLKSIZE, bp->bio_pblkno); device_printf(sc->mlx_dev, " %13D\n", mc->mc_mailbox, " "); #endif break; } } mlx_releasecmd(mc); mlxd_intr(bp); } void mlx_user_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_usercommand *mu; struct mlx_command *mc; struct mlx_dcdb *dcdb; mc = (struct mlx_command *)arg; if (error) return; mlx_setup_dmamap(mc, segs, nsegments, error); mu = (struct mlx_usercommand *)mc->mc_private; dcdb = NULL; /* * If this is a passthrough SCSI command, the DCDB is packed at the * beginning of the data area. Fix up the DCDB to point to the correct * physical address and override any bufptr supplied by the caller since * we know what it's meant to be. */ if (mc->mc_mailbox[0] == MLX_CMD_DIRECT_CDB) { dcdb = (struct mlx_dcdb *)mc->mc_data; dcdb->dcdb_physaddr = mc->mc_dataphys + sizeof(*dcdb); mu->mu_bufptr = 8; } /* * If there's a data buffer, fix up the command's buffer pointer. */ if (mu->mu_datasize > 0) { mc->mc_mailbox[mu->mu_bufptr ] = mc->mc_dataphys & 0xff; mc->mc_mailbox[mu->mu_bufptr + 1] = (mc->mc_dataphys >> 8) & 0xff; mc->mc_mailbox[mu->mu_bufptr + 2] = (mc->mc_dataphys >> 16) & 0xff; mc->mc_mailbox[mu->mu_bufptr + 3] = (mc->mc_dataphys >> 24) & 0xff; } debug(0, "command fixup"); /* submit the command and wait */ if (mlx_wait_command(mc) != 0) return; } /******************************************************************************** * Take a command from user-space and try to run it. * * XXX Note that this can't perform very much in the way of error checking, and * as such, applications _must_ be considered trustworthy. * XXX Commands using S/G for data are not supported. */ static int mlx_user_command(struct mlx_softc *sc, struct mlx_usercommand *mu) { struct mlx_command *mc; void *kbuf; int error; debug_called(0); kbuf = NULL; mc = NULL; error = ENOMEM; /* get ourselves a command and copy in from user space */ MLX_IO_LOCK(sc); if ((mc = mlx_alloccmd(sc)) == NULL) { MLX_IO_UNLOCK(sc); return(error); } bcopy(mu->mu_command, mc->mc_mailbox, sizeof(mc->mc_mailbox)); debug(0, "got command buffer"); /* * if we need a buffer for data transfer, allocate one and copy in its * initial contents */ if (mu->mu_datasize > 0) { if (mu->mu_datasize > MLX_MAXPHYS) { error = EINVAL; goto out; } MLX_IO_UNLOCK(sc); kbuf = malloc(mu->mu_datasize, M_DEVBUF, M_WAITOK); if ((error = copyin(mu->mu_buf, kbuf, mu->mu_datasize))) { MLX_IO_LOCK(sc); goto out; } MLX_IO_LOCK(sc); debug(0, "got kernel buffer"); } /* get a command slot */ if (mlx_getslot(mc)) goto out; debug(0, "got a slot"); if (mu->mu_datasize > 0) { /* range check the pointer to physical buffer address */ if ((mu->mu_bufptr < 0) || (mu->mu_bufptr > (sizeof(mu->mu_command) - sizeof(u_int32_t)))) { error = EINVAL; goto out; } } /* map the command so the controller can see it */ mc->mc_data = kbuf; mc->mc_length = mu->mu_datasize; mc->mc_private = mu; error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data, mc->mc_length, mlx_user_cb, mc, BUS_DMA_NOWAIT); if (error) goto out; /* copy out status and data */ mu->mu_status = mc->mc_status; if (mu->mu_datasize > 0) { MLX_IO_UNLOCK(sc); error = copyout(kbuf, mu->mu_buf, mu->mu_datasize); MLX_IO_LOCK(sc); } out: mlx_releasecmd(mc); MLX_IO_UNLOCK(sc); if (kbuf != NULL) free(kbuf, M_DEVBUF); return(error); } /******************************************************************************** ******************************************************************************** Command I/O to Controller ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Find a free command slot for (mc). * * Don't hand out a slot to a normal-priority command unless there are at least * 4 slots free for priority commands. */ static int mlx_getslot(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int slot, limit; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); /* * Enforce slot-usage limit, if we have the required information. */ if (sc->mlx_enq2 != NULL) { limit = sc->mlx_enq2->me_max_commands; } else { limit = 2; } if (sc->mlx_busycmds >= ((mc->mc_flags & MLX_CMD_PRIORITY) ? limit : limit - 4)) return(EBUSY); /* * Allocate an outstanding command slot * * XXX linear search is slow */ for (slot = 0; slot < limit; slot++) { debug(2, "try slot %d", slot); if (sc->mlx_busycmd[slot] == NULL) break; } if (slot < limit) { sc->mlx_busycmd[slot] = mc; sc->mlx_busycmds++; } /* out of slots? */ if (slot >= limit) return(EBUSY); debug(2, "got slot %d", slot); mc->mc_slot = slot; return(0); } /******************************************************************************** * Map/unmap (mc)'s data in the controller's addressable space. */ static void mlx_setup_dmamap(struct mlx_command *mc, bus_dma_segment_t *segs, int nsegments, int error) { struct mlx_softc *sc = mc->mc_sc; struct mlx_sgentry *sg; int i; debug_called(1); /* XXX should be unnecessary */ if (sc->mlx_enq2 && (nsegments > sc->mlx_enq2->me_max_sg)) panic("MLX: too many s/g segments (%d, max %d)", nsegments, sc->mlx_enq2->me_max_sg); /* get base address of s/g table */ sg = sc->mlx_sgtable + (mc->mc_slot * MLX_NSEG); /* save s/g table information in command */ mc->mc_nsgent = nsegments; mc->mc_sgphys = sc->mlx_sgbusaddr + (mc->mc_slot * MLX_NSEG * sizeof(struct mlx_sgentry)); mc->mc_dataphys = segs[0].ds_addr; /* populate s/g table */ for (i = 0; i < nsegments; i++, sg++) { sg->sg_addr = segs[i].ds_addr; sg->sg_count = segs[i].ds_len; } /* Make sure the buffers are visible on the bus. */ if (mc->mc_flags & MLX_CMD_DATAIN) bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_PREREAD); if (mc->mc_flags & MLX_CMD_DATAOUT) bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_PREWRITE); } static void mlx_unmapcmd(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; debug_called(1); /* if the command involved data at all */ if (mc->mc_data != NULL) { if (mc->mc_flags & MLX_CMD_DATAIN) bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTREAD); if (mc->mc_flags & MLX_CMD_DATAOUT) bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mlx_buffer_dmat, mc->mc_dmamap); } } /******************************************************************************** * Try to deliver (mc) to the controller. * * Can be called at any interrupt level, with or without interrupts enabled. */ static int mlx_start(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; int i; debug_called(1); /* save the slot number as ident so we can handle this command when complete */ mc->mc_mailbox[0x1] = mc->mc_slot; /* mark the command as currently being processed */ mc->mc_status = MLX_STATUS_BUSY; /* set a default 60-second timeout XXX tunable? XXX not currently used */ mc->mc_timeout = time_second + 60; /* spin waiting for the mailbox */ for (i = 100000; i > 0; i--) { if (sc->mlx_tryqueue(sc, mc)) { /* move command to work queue */ TAILQ_INSERT_TAIL(&sc->mlx_work, mc, mc_link); return (0); } else if (i > 1) mlx_done(sc, 0); } /* * We couldn't get the controller to take the command. Revoke the slot * that the command was given and return it with a bad status. */ sc->mlx_busycmd[mc->mc_slot] = NULL; device_printf(sc->mlx_dev, "controller wedged (not taking commands)\n"); mc->mc_status = MLX_STATUS_WEDGED; mlx_complete(sc); return(EIO); } /******************************************************************************** * Poll the controller (sc) for completed commands. * Update command status and free slots for reuse. If any slots were freed, * new commands may be posted. * * Returns nonzero if one or more commands were completed. */ static int mlx_done(struct mlx_softc *sc, int startio) { struct mlx_command *mc; int result; u_int8_t slot; u_int16_t status; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); result = 0; /* loop collecting completed commands */ for (;;) { /* poll for a completed command's identifier and status */ if (sc->mlx_findcomplete(sc, &slot, &status)) { result = 1; mc = sc->mlx_busycmd[slot]; /* find command */ if (mc != NULL) { /* paranoia */ if (mc->mc_status == MLX_STATUS_BUSY) { mc->mc_status = status; /* save status */ /* free slot for reuse */ sc->mlx_busycmd[slot] = NULL; sc->mlx_busycmds--; } else { device_printf(sc->mlx_dev, "duplicate done event for slot %d\n", slot); } } else { device_printf(sc->mlx_dev, "done event for nonbusy slot %d\n", slot); } } else { break; } } /* if we've completed any commands, try posting some more */ if (result && startio) mlx_startio(sc); /* handle completion and timeouts */ mlx_complete(sc); return(result); } /******************************************************************************** * Perform post-completion processing for commands on (sc). */ static void mlx_complete(struct mlx_softc *sc) { struct mlx_command *mc, *nc; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* scan the list of busy/done commands */ mc = TAILQ_FIRST(&sc->mlx_work); while (mc != NULL) { nc = TAILQ_NEXT(mc, mc_link); /* Command has been completed in some fashion */ if (mc->mc_status != MLX_STATUS_BUSY) { /* unmap the command's data buffer */ mlx_unmapcmd(mc); /* * Does the command have a completion handler? */ if (mc->mc_complete != NULL) { /* remove from list and give to handler */ TAILQ_REMOVE(&sc->mlx_work, mc, mc_link); mc->mc_complete(mc); /* * Is there a sleeper waiting on this command? */ } else if (mc->mc_private != NULL) { /* sleeping caller wants to know about it */ /* remove from list and wake up sleeper */ TAILQ_REMOVE(&sc->mlx_work, mc, mc_link); wakeup_one(mc->mc_private); /* * Leave the command for a caller that's polling for it. */ } else { } } mc = nc; } } /******************************************************************************** ******************************************************************************** Command Buffer Management ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Get a new command buffer. * * This may return NULL in low-memory cases. * * Note that using malloc() is expensive (the command buffer is << 1 page) but * necessary if we are to be a loadable module before the zone allocator is fixed. * * If possible, we recycle a command buffer that's been used before. * * XXX Note that command buffers are not cleaned out - it is the caller's * responsibility to ensure that all required fields are filled in before * using a buffer. */ static struct mlx_command * mlx_alloccmd(struct mlx_softc *sc) { struct mlx_command *mc; int error; debug_called(1); MLX_IO_ASSERT_LOCKED(sc); if ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL) TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link); /* allocate a new command buffer? */ if (mc == NULL) { mc = (struct mlx_command *)malloc(sizeof(*mc), M_DEVBUF, M_NOWAIT | M_ZERO); if (mc != NULL) { mc->mc_sc = sc; error = bus_dmamap_create(sc->mlx_buffer_dmat, 0, &mc->mc_dmamap); if (error) { free(mc, M_DEVBUF); return(NULL); } } } return(mc); } /******************************************************************************** * Release a command buffer for recycling. * * XXX It might be a good idea to limit the number of commands we save for reuse * if it's shown that this list bloats out massively. */ static void mlx_releasecmd(struct mlx_command *mc) { debug_called(1); MLX_IO_ASSERT_LOCKED(mc->mc_sc); TAILQ_INSERT_HEAD(&mc->mc_sc->mlx_freecmds, mc, mc_link); } /******************************************************************************** * Permanently discard a command buffer. */ static void mlx_freecmd(struct mlx_command *mc) { struct mlx_softc *sc = mc->mc_sc; debug_called(1); bus_dmamap_destroy(sc->mlx_buffer_dmat, mc->mc_dmamap); free(mc, M_DEVBUF); } /******************************************************************************** ******************************************************************************** Type 3 interface accessor methods ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure * (the controller is not ready to take a command). */ static int mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc) { int i; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* ready for our command? */ if (!(MLX_V3_GET_IDBR(sc) & MLX_V3_IDB_FULL)) { /* copy mailbox data to window */ for (i = 0; i < 13; i++) MLX_V3_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]); /* post command */ MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_FULL); return(1); } return(0); } /******************************************************************************** * See if a command has been completed, if so acknowledge its completion * and recover the slot number and status code. */ static int mlx_v3_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status) { debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* status available? */ if (MLX_V3_GET_ODBR(sc) & MLX_V3_ODB_SAVAIL) { *slot = MLX_V3_GET_STATUS_IDENT(sc); /* get command identifier */ *status = MLX_V3_GET_STATUS(sc); /* get status */ /* acknowledge completion */ MLX_V3_PUT_ODBR(sc, MLX_V3_ODB_SAVAIL); MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_SACK); return(1); } return(0); } /******************************************************************************** * Enable/disable interrupts as requested. (No acknowledge required) */ static void mlx_v3_intaction(struct mlx_softc *sc, int action) { debug_called(1); MLX_IO_ASSERT_LOCKED(sc); switch(action) { case MLX_INTACTION_DISABLE: MLX_V3_PUT_IER(sc, 0); sc->mlx_state &= ~MLX_STATE_INTEN; break; case MLX_INTACTION_ENABLE: MLX_V3_PUT_IER(sc, 1); sc->mlx_state |= MLX_STATE_INTEN; break; } } /******************************************************************************** * Poll for firmware error codes during controller initialisation. * Returns 0 if initialisation is complete, 1 if still in progress but no * error has been fetched, 2 if an error has been retrieved. */ static int mlx_v3_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first) { u_int8_t fwerror; debug_called(2); /* first time around, clear any hardware completion status */ if (first) { MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_SACK); DELAY(1000); } /* init in progress? */ if (!(MLX_V3_GET_IDBR(sc) & MLX_V3_IDB_INIT_BUSY)) return(0); /* test error value */ fwerror = MLX_V3_GET_FWERROR(sc); if (!(fwerror & MLX_V3_FWERROR_PEND)) return(1); /* mask status pending bit, fetch status */ *error = fwerror & ~MLX_V3_FWERROR_PEND; *param1 = MLX_V3_GET_FWERROR_PARAM1(sc); *param2 = MLX_V3_GET_FWERROR_PARAM2(sc); /* acknowledge */ MLX_V3_PUT_FWERROR(sc, 0); return(2); } /******************************************************************************** ******************************************************************************** Type 4 interface accessor methods ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure * (the controller is not ready to take a command). */ static int mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc) { int i; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* ready for our command? */ if (!(MLX_V4_GET_IDBR(sc) & MLX_V4_IDB_FULL)) { /* copy mailbox data to window */ for (i = 0; i < 13; i++) MLX_V4_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]); /* memory-mapped controller, so issue a write barrier to ensure the mailbox is filled */ bus_barrier(sc->mlx_mem, MLX_V4_MAILBOX, MLX_V4_MAILBOX_LENGTH, BUS_SPACE_BARRIER_WRITE); /* post command */ MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_HWMBOX_CMD); return(1); } return(0); } /******************************************************************************** * See if a command has been completed, if so acknowledge its completion * and recover the slot number and status code. */ static int mlx_v4_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status) { debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* status available? */ if (MLX_V4_GET_ODBR(sc) & MLX_V4_ODB_HWSAVAIL) { *slot = MLX_V4_GET_STATUS_IDENT(sc); /* get command identifier */ *status = MLX_V4_GET_STATUS(sc); /* get status */ /* acknowledge completion */ MLX_V4_PUT_ODBR(sc, MLX_V4_ODB_HWMBOX_ACK); MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_SACK); return(1); } return(0); } /******************************************************************************** * Enable/disable interrupts as requested. */ static void mlx_v4_intaction(struct mlx_softc *sc, int action) { debug_called(1); MLX_IO_ASSERT_LOCKED(sc); switch(action) { case MLX_INTACTION_DISABLE: MLX_V4_PUT_IER(sc, MLX_V4_IER_MASK | MLX_V4_IER_DISINT); sc->mlx_state &= ~MLX_STATE_INTEN; break; case MLX_INTACTION_ENABLE: MLX_V4_PUT_IER(sc, MLX_V4_IER_MASK & ~MLX_V4_IER_DISINT); sc->mlx_state |= MLX_STATE_INTEN; break; } } /******************************************************************************** * Poll for firmware error codes during controller initialisation. * Returns 0 if initialisation is complete, 1 if still in progress but no * error has been fetched, 2 if an error has been retrieved. */ static int mlx_v4_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first) { u_int8_t fwerror; debug_called(2); /* first time around, clear any hardware completion status */ if (first) { MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_SACK); DELAY(1000); } /* init in progress? */ if (!(MLX_V4_GET_IDBR(sc) & MLX_V4_IDB_INIT_BUSY)) return(0); /* test error value */ fwerror = MLX_V4_GET_FWERROR(sc); if (!(fwerror & MLX_V4_FWERROR_PEND)) return(1); /* mask status pending bit, fetch status */ *error = fwerror & ~MLX_V4_FWERROR_PEND; *param1 = MLX_V4_GET_FWERROR_PARAM1(sc); *param2 = MLX_V4_GET_FWERROR_PARAM2(sc); /* acknowledge */ MLX_V4_PUT_FWERROR(sc, 0); return(2); } /******************************************************************************** ******************************************************************************** Type 5 interface accessor methods ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure * (the controller is not ready to take a command). */ static int mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc) { int i; debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* ready for our command? */ if (MLX_V5_GET_IDBR(sc) & MLX_V5_IDB_EMPTY) { /* copy mailbox data to window */ for (i = 0; i < 13; i++) MLX_V5_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]); /* post command */ MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_HWMBOX_CMD); return(1); } return(0); } /******************************************************************************** * See if a command has been completed, if so acknowledge its completion * and recover the slot number and status code. */ static int mlx_v5_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status) { debug_called(2); MLX_IO_ASSERT_LOCKED(sc); /* status available? */ if (MLX_V5_GET_ODBR(sc) & MLX_V5_ODB_HWSAVAIL) { *slot = MLX_V5_GET_STATUS_IDENT(sc); /* get command identifier */ *status = MLX_V5_GET_STATUS(sc); /* get status */ /* acknowledge completion */ MLX_V5_PUT_ODBR(sc, MLX_V5_ODB_HWMBOX_ACK); MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_SACK); return(1); } return(0); } /******************************************************************************** * Enable/disable interrupts as requested. */ static void mlx_v5_intaction(struct mlx_softc *sc, int action) { debug_called(1); MLX_IO_ASSERT_LOCKED(sc); switch(action) { case MLX_INTACTION_DISABLE: MLX_V5_PUT_IER(sc, 0xff & MLX_V5_IER_DISINT); sc->mlx_state &= ~MLX_STATE_INTEN; break; case MLX_INTACTION_ENABLE: MLX_V5_PUT_IER(sc, 0xff & ~MLX_V5_IER_DISINT); sc->mlx_state |= MLX_STATE_INTEN; break; } } /******************************************************************************** * Poll for firmware error codes during controller initialisation. * Returns 0 if initialisation is complete, 1 if still in progress but no * error has been fetched, 2 if an error has been retrieved. */ static int mlx_v5_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2, int first) { u_int8_t fwerror; debug_called(2); /* first time around, clear any hardware completion status */ if (first) { MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_SACK); DELAY(1000); } /* init in progress? */ if (MLX_V5_GET_IDBR(sc) & MLX_V5_IDB_INIT_DONE) return(0); /* test for error value */ fwerror = MLX_V5_GET_FWERROR(sc); if (!(fwerror & MLX_V5_FWERROR_PEND)) return(1); /* mask status pending bit, fetch status */ *error = fwerror & ~MLX_V5_FWERROR_PEND; *param1 = MLX_V5_GET_FWERROR_PARAM1(sc); *param2 = MLX_V5_GET_FWERROR_PARAM2(sc); /* acknowledge */ MLX_V5_PUT_FWERROR(sc, 0xff); return(2); } /******************************************************************************** ******************************************************************************** Debugging ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Return a status message describing (mc) */ static char *mlx_status_messages[] = { "normal completion", /* 00 */ "irrecoverable data error", /* 01 */ "drive does not exist, or is offline", /* 02 */ "attempt to write beyond end of drive", /* 03 */ "bad data encountered", /* 04 */ "invalid log entry request", /* 05 */ "attempt to rebuild online drive", /* 06 */ "new disk failed during rebuild", /* 07 */ "invalid channel/target", /* 08 */ "rebuild/check already in progress", /* 09 */ "one or more disks are dead", /* 10 */ "invalid or non-redundant drive", /* 11 */ "channel is busy", /* 12 */ "channel is not stopped", /* 13 */ "rebuild successfully terminated", /* 14 */ "unsupported command", /* 15 */ "check condition received", /* 16 */ "device is busy", /* 17 */ "selection or command timeout", /* 18 */ "command terminated abnormally", /* 19 */ "" }; static struct { int command; u_int16_t status; int msg; } mlx_messages[] = { {MLX_CMD_READSG, 0x0001, 1}, {MLX_CMD_READSG, 0x0002, 1}, {MLX_CMD_READSG, 0x0105, 3}, {MLX_CMD_READSG, 0x010c, 4}, {MLX_CMD_WRITESG, 0x0001, 1}, {MLX_CMD_WRITESG, 0x0002, 1}, {MLX_CMD_WRITESG, 0x0105, 3}, {MLX_CMD_READSG_OLD, 0x0001, 1}, {MLX_CMD_READSG_OLD, 0x0002, 1}, {MLX_CMD_READSG_OLD, 0x0105, 3}, {MLX_CMD_WRITESG_OLD, 0x0001, 1}, {MLX_CMD_WRITESG_OLD, 0x0002, 1}, {MLX_CMD_WRITESG_OLD, 0x0105, 3}, {MLX_CMD_LOGOP, 0x0105, 5}, {MLX_CMD_REBUILDASYNC, 0x0002, 6}, {MLX_CMD_REBUILDASYNC, 0x0004, 7}, {MLX_CMD_REBUILDASYNC, 0x0105, 8}, {MLX_CMD_REBUILDASYNC, 0x0106, 9}, {MLX_CMD_REBUILDASYNC, 0x0107, 14}, {MLX_CMD_CHECKASYNC, 0x0002, 10}, {MLX_CMD_CHECKASYNC, 0x0105, 11}, {MLX_CMD_CHECKASYNC, 0x0106, 9}, {MLX_CMD_STOPCHANNEL, 0x0106, 12}, {MLX_CMD_STOPCHANNEL, 0x0105, 8}, {MLX_CMD_STARTCHANNEL, 0x0005, 13}, {MLX_CMD_STARTCHANNEL, 0x0105, 8}, {MLX_CMD_DIRECT_CDB, 0x0002, 16}, {MLX_CMD_DIRECT_CDB, 0x0008, 17}, {MLX_CMD_DIRECT_CDB, 0x000e, 18}, {MLX_CMD_DIRECT_CDB, 0x000f, 19}, {MLX_CMD_DIRECT_CDB, 0x0105, 8}, {0, 0x0104, 14}, {-1, 0, 0} }; static char * mlx_diagnose_command(struct mlx_command *mc) { static char unkmsg[80]; int i; /* look up message in table */ for (i = 0; mlx_messages[i].command != -1; i++) if (((mc->mc_mailbox[0] == mlx_messages[i].command) || (mlx_messages[i].command == 0)) && (mc->mc_status == mlx_messages[i].status)) return(mlx_status_messages[mlx_messages[i].msg]); sprintf(unkmsg, "unknown response 0x%x for command 0x%x", (int)mc->mc_status, (int)mc->mc_mailbox[0]); return(unkmsg); } /******************************************************************************* * Print a string describing the controller (sc) */ static struct { int hwid; char *name; } mlx_controller_names[] = { {0x01, "960P/PD"}, {0x02, "960PL"}, {0x10, "960PG"}, {0x11, "960PJ"}, {0x12, "960PR"}, {0x13, "960PT"}, {0x14, "960PTL0"}, {0x15, "960PRL"}, {0x16, "960PTL1"}, {0x20, "1164PVX"}, {-1, NULL} }; static void mlx_describe_controller(struct mlx_softc *sc) { static char buf[80]; char *model; int i; for (i = 0, model = NULL; mlx_controller_names[i].name != NULL; i++) { if ((sc->mlx_enq2->me_hardware_id & 0xff) == mlx_controller_names[i].hwid) { model = mlx_controller_names[i].name; break; } } if (model == NULL) { sprintf(buf, " model 0x%x", sc->mlx_enq2->me_hardware_id & 0xff); model = buf; } device_printf(sc->mlx_dev, "DAC%s, %d channel%s, firmware %d.%02d-%c-%02d, %dMB RAM\n", model, sc->mlx_enq2->me_actual_channels, sc->mlx_enq2->me_actual_channels > 1 ? "s" : "", sc->mlx_enq2->me_firmware_id & 0xff, (sc->mlx_enq2->me_firmware_id >> 8) & 0xff, (sc->mlx_enq2->me_firmware_id >> 24) & 0xff, (sc->mlx_enq2->me_firmware_id >> 16) & 0xff, sc->mlx_enq2->me_mem_size / (1024 * 1024)); if (bootverbose) { device_printf(sc->mlx_dev, " Hardware ID 0x%08x\n", sc->mlx_enq2->me_hardware_id); device_printf(sc->mlx_dev, " Firmware ID 0x%08x\n", sc->mlx_enq2->me_firmware_id); device_printf(sc->mlx_dev, " Configured/Actual channels %d/%d\n", sc->mlx_enq2->me_configured_channels, sc->mlx_enq2->me_actual_channels); device_printf(sc->mlx_dev, " Max Targets %d\n", sc->mlx_enq2->me_max_targets); device_printf(sc->mlx_dev, " Max Tags %d\n", sc->mlx_enq2->me_max_tags); device_printf(sc->mlx_dev, " Max System Drives %d\n", sc->mlx_enq2->me_max_sys_drives); device_printf(sc->mlx_dev, " Max Arms %d\n", sc->mlx_enq2->me_max_arms); device_printf(sc->mlx_dev, " Max Spans %d\n", sc->mlx_enq2->me_max_spans); device_printf(sc->mlx_dev, " DRAM/cache/flash/NVRAM size %d/%d/%d/%d\n", sc->mlx_enq2->me_mem_size, sc->mlx_enq2->me_cache_size, sc->mlx_enq2->me_flash_size, sc->mlx_enq2->me_nvram_size); device_printf(sc->mlx_dev, " DRAM type %d\n", sc->mlx_enq2->me_mem_type); device_printf(sc->mlx_dev, " Clock Speed %dns\n", sc->mlx_enq2->me_clock_speed); device_printf(sc->mlx_dev, " Hardware Speed %dns\n", sc->mlx_enq2->me_hardware_speed); device_printf(sc->mlx_dev, " Max Commands %d\n", sc->mlx_enq2->me_max_commands); device_printf(sc->mlx_dev, " Max SG Entries %d\n", sc->mlx_enq2->me_max_sg); device_printf(sc->mlx_dev, " Max DP %d\n", sc->mlx_enq2->me_max_dp); device_printf(sc->mlx_dev, " Max IOD %d\n", sc->mlx_enq2->me_max_iod); device_printf(sc->mlx_dev, " Max Comb %d\n", sc->mlx_enq2->me_max_comb); device_printf(sc->mlx_dev, " Latency %ds\n", sc->mlx_enq2->me_latency); device_printf(sc->mlx_dev, " SCSI Timeout %ds\n", sc->mlx_enq2->me_scsi_timeout); device_printf(sc->mlx_dev, " Min Free Lines %d\n", sc->mlx_enq2->me_min_freelines); device_printf(sc->mlx_dev, " Rate Constant %d\n", sc->mlx_enq2->me_rate_const); device_printf(sc->mlx_dev, " MAXBLK %d\n", sc->mlx_enq2->me_maxblk); device_printf(sc->mlx_dev, " Blocking Factor %d sectors\n", sc->mlx_enq2->me_blocking_factor); device_printf(sc->mlx_dev, " Cache Line Size %d blocks\n", sc->mlx_enq2->me_cacheline); device_printf(sc->mlx_dev, " SCSI Capability %s%dMHz, %d bit\n", sc->mlx_enq2->me_scsi_cap & (1<<4) ? "differential " : "", (1 << ((sc->mlx_enq2->me_scsi_cap >> 2) & 3)) * 10, 8 << (sc->mlx_enq2->me_scsi_cap & 0x3)); device_printf(sc->mlx_dev, " Firmware Build Number %d\n", sc->mlx_enq2->me_firmware_build); device_printf(sc->mlx_dev, " Fault Management Type %d\n", sc->mlx_enq2->me_fault_mgmt_type); device_printf(sc->mlx_dev, " Features %b\n", sc->mlx_enq2->me_firmware_features, "\20\4Background Init\3Read Ahead\2MORE\1Cluster\n"); } } /******************************************************************************* * Emit a string describing the firmware handshake status code, and return a flag * indicating whether the code represents a fatal error. * * Error code interpretations are from the Linux driver, and don't directly match * the messages printed by Mylex's BIOS. This may change if documentation on the * codes is forthcoming. */ static int mlx_fw_message(struct mlx_softc *sc, int error, int param1, int param2) { switch(error) { case 0x00: device_printf(sc->mlx_dev, "physical drive %d:%d not responding\n", param2, param1); break; case 0x08: /* we could be neater about this and give some indication when we receive more of them */ if (!(sc->mlx_flags & MLX_SPINUP_REPORTED)) { device_printf(sc->mlx_dev, "spinning up drives...\n"); sc->mlx_flags |= MLX_SPINUP_REPORTED; } break; case 0x30: device_printf(sc->mlx_dev, "configuration checksum error\n"); break; case 0x60: device_printf(sc->mlx_dev, "mirror race recovery failed\n"); break; case 0x70: device_printf(sc->mlx_dev, "mirror race recovery in progress\n"); break; case 0x90: device_printf(sc->mlx_dev, "physical drive %d:%d COD mismatch\n", param2, param1); break; case 0xa0: device_printf(sc->mlx_dev, "logical drive installation aborted\n"); break; case 0xb0: device_printf(sc->mlx_dev, "mirror race on a critical system drive\n"); break; case 0xd0: device_printf(sc->mlx_dev, "new controller configuration found\n"); break; case 0xf0: device_printf(sc->mlx_dev, "FATAL MEMORY PARITY ERROR\n"); return(1); default: device_printf(sc->mlx_dev, "unknown firmware initialisation error %02x:%02x:%02x\n", error, param1, param2); break; } return(0); } /******************************************************************************** ******************************************************************************** Utility Functions ******************************************************************************** ********************************************************************************/ /******************************************************************************** * Find the disk whose unit number is (unit) on this controller */ static struct mlx_sysdrive * mlx_findunit(struct mlx_softc *sc, int unit) { int i; /* search system drives */ MLX_CONFIG_ASSERT_LOCKED(sc); for (i = 0; i < MLX_MAXDRIVES; i++) { /* is this one attached? */ if (sc->mlx_sysdrive[i].ms_disk != 0) { /* is this the one? */ if (unit == device_get_unit(sc->mlx_sysdrive[i].ms_disk)) return(&sc->mlx_sysdrive[i]); } } return(NULL); } diff --git a/sys/dev/nfsmb/nfsmb.c b/sys/dev/nfsmb/nfsmb.c index 462f90264885..b88b2ca0001f 100644 --- a/sys/dev/nfsmb/nfsmb.c +++ b/sys/dev/nfsmb/nfsmb.c @@ -1,650 +1,645 @@ /*- * Copyright (c) 2005 Ruslan Ermilov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "smbus_if.h" #define NFSMB_DEBUG(x) if (nfsmb_debug) (x) #ifdef DEBUG static int nfsmb_debug = 1; #else static int nfsmb_debug = 0; #endif /* NVIDIA nForce2/3/4 MCP */ #define NFSMB_VENDORID_NVIDIA 0x10de #define NFSMB_DEVICEID_NF2_SMB 0x0064 #define NFSMB_DEVICEID_NF2_ULTRA_SMB 0x0084 #define NFSMB_DEVICEID_NF3_PRO150_SMB 0x00d4 #define NFSMB_DEVICEID_NF3_250GB_SMB 0x00e4 #define NFSMB_DEVICEID_NF4_SMB 0x0052 #define NFSMB_DEVICEID_NF4_04_SMB 0x0034 #define NFSMB_DEVICEID_NF4_51_SMB 0x0264 #define NFSMB_DEVICEID_NF4_55_SMB 0x0368 #define NFSMB_DEVICEID_NF4_61_SMB 0x03eb #define NFSMB_DEVICEID_NF4_65_SMB 0x0446 #define NFSMB_DEVICEID_NF4_67_SMB 0x0542 #define NFSMB_DEVICEID_NF4_73_SMB 0x07d8 #define NFSMB_DEVICEID_NF4_78S_SMB 0x0752 #define NFSMB_DEVICEID_NF4_79_SMB 0x0aa2 /* PCI Configuration space registers */ #define NF2PCI_SMBASE_1 PCIR_BAR(4) #define NF2PCI_SMBASE_2 PCIR_BAR(5) /* * ACPI 3.0, Chapter 12, SMBus Host Controller Interface. */ #define SMB_PRTCL 0x00 /* protocol */ #define SMB_STS 0x01 /* status */ #define SMB_ADDR 0x02 /* address */ #define SMB_CMD 0x03 /* command */ #define SMB_DATA 0x04 /* 32 data registers */ #define SMB_BCNT 0x24 /* number of data bytes */ #define SMB_ALRM_A 0x25 /* alarm address */ #define SMB_ALRM_D 0x26 /* 2 bytes alarm data */ #define SMB_STS_DONE 0x80 #define SMB_STS_ALRM 0x40 #define SMB_STS_RES 0x20 #define SMB_STS_STATUS 0x1f #define SMB_STS_OK 0x00 /* OK */ #define SMB_STS_UF 0x07 /* Unknown Failure */ #define SMB_STS_DANA 0x10 /* Device Address Not Acknowledged */ #define SMB_STS_DED 0x11 /* Device Error Detected */ #define SMB_STS_DCAD 0x12 /* Device Command Access Denied */ #define SMB_STS_UE 0x13 /* Unknown Error */ #define SMB_STS_DAD 0x17 /* Device Access Denied */ #define SMB_STS_T 0x18 /* Timeout */ #define SMB_STS_HUP 0x19 /* Host Unsupported Protocol */ #define SMB_STS_B 0x1A /* Busy */ #define SMB_STS_PEC 0x1F /* PEC (CRC-8) Error */ #define SMB_PRTCL_WRITE 0x00 #define SMB_PRTCL_READ 0x01 #define SMB_PRTCL_QUICK 0x02 #define SMB_PRTCL_BYTE 0x04 #define SMB_PRTCL_BYTE_DATA 0x06 #define SMB_PRTCL_WORD_DATA 0x08 #define SMB_PRTCL_BLOCK_DATA 0x0a #define SMB_PRTCL_PROC_CALL 0x0c #define SMB_PRTCL_BLOCK_PROC_CALL 0x0d #define SMB_PRTCL_PEC 0x80 struct nfsmb_softc { int rid; struct resource *res; device_t smbus; device_t subdev; struct mtx lock; }; #define NFSMB_LOCK(nfsmb) mtx_lock(&(nfsmb)->lock) #define NFSMB_UNLOCK(nfsmb) mtx_unlock(&(nfsmb)->lock) #define NFSMB_LOCK_ASSERT(nfsmb) mtx_assert(&(nfsmb)->lock, MA_OWNED) #define NFSMB_SMBINB(nfsmb, register) \ (bus_read_1(nfsmb->res, register)) #define NFSMB_SMBOUTB(nfsmb, register, value) \ (bus_write_1(nfsmb->res, register, value)) static int nfsmb_detach(device_t dev); static int nfsmbsub_detach(device_t dev); static int nfsmbsub_probe(device_t dev) { device_set_desc(dev, "nForce2/3/4 MCP SMBus Controller"); return (BUS_PROBE_DEFAULT); } static int nfsmb_probe(device_t dev) { u_int16_t vid; u_int16_t did; vid = pci_get_vendor(dev); did = pci_get_device(dev); if (vid == NFSMB_VENDORID_NVIDIA) { switch(did) { case NFSMB_DEVICEID_NF2_SMB: case NFSMB_DEVICEID_NF2_ULTRA_SMB: case NFSMB_DEVICEID_NF3_PRO150_SMB: case NFSMB_DEVICEID_NF3_250GB_SMB: case NFSMB_DEVICEID_NF4_SMB: case NFSMB_DEVICEID_NF4_04_SMB: case NFSMB_DEVICEID_NF4_51_SMB: case NFSMB_DEVICEID_NF4_55_SMB: case NFSMB_DEVICEID_NF4_61_SMB: case NFSMB_DEVICEID_NF4_65_SMB: case NFSMB_DEVICEID_NF4_67_SMB: case NFSMB_DEVICEID_NF4_73_SMB: case NFSMB_DEVICEID_NF4_78S_SMB: case NFSMB_DEVICEID_NF4_79_SMB: device_set_desc(dev, "nForce2/3/4 MCP SMBus Controller"); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int nfsmbsub_attach(device_t dev) { device_t parent; struct nfsmb_softc *nfsmbsub_sc = device_get_softc(dev); parent = device_get_parent(dev); nfsmbsub_sc->rid = NF2PCI_SMBASE_2; nfsmbsub_sc->res = bus_alloc_resource_any(parent, SYS_RES_IOPORT, &nfsmbsub_sc->rid, RF_ACTIVE); if (nfsmbsub_sc->res == NULL) { /* Older incarnations of the device used non-standard BARs. */ nfsmbsub_sc->rid = 0x54; nfsmbsub_sc->res = bus_alloc_resource_any(parent, SYS_RES_IOPORT, &nfsmbsub_sc->rid, RF_ACTIVE); if (nfsmbsub_sc->res == NULL) { device_printf(dev, "could not map i/o space\n"); return (ENXIO); } } mtx_init(&nfsmbsub_sc->lock, device_get_nameunit(dev), "nfsmb", MTX_DEF); nfsmbsub_sc->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY); if (nfsmbsub_sc->smbus == NULL) { nfsmbsub_detach(dev); return (EINVAL); } bus_attach_children(dev); return (0); } static int nfsmb_attach(device_t dev) { struct nfsmb_softc *nfsmb_sc = device_get_softc(dev); /* Allocate I/O space */ nfsmb_sc->rid = NF2PCI_SMBASE_1; nfsmb_sc->res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &nfsmb_sc->rid, RF_ACTIVE); if (nfsmb_sc->res == NULL) { /* Older incarnations of the device used non-standard BARs. */ nfsmb_sc->rid = 0x50; nfsmb_sc->res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &nfsmb_sc->rid, RF_ACTIVE); if (nfsmb_sc->res == NULL) { device_printf(dev, "could not map i/o space\n"); return (ENXIO); } } mtx_init(&nfsmb_sc->lock, device_get_nameunit(dev), "nfsmb", MTX_DEF); /* Allocate a new smbus device */ nfsmb_sc->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY); if (!nfsmb_sc->smbus) { nfsmb_detach(dev); return (EINVAL); } nfsmb_sc->subdev = NULL; switch (pci_get_device(dev)) { case NFSMB_DEVICEID_NF2_SMB: case NFSMB_DEVICEID_NF2_ULTRA_SMB: case NFSMB_DEVICEID_NF3_PRO150_SMB: case NFSMB_DEVICEID_NF3_250GB_SMB: case NFSMB_DEVICEID_NF4_SMB: case NFSMB_DEVICEID_NF4_04_SMB: case NFSMB_DEVICEID_NF4_51_SMB: case NFSMB_DEVICEID_NF4_55_SMB: case NFSMB_DEVICEID_NF4_61_SMB: case NFSMB_DEVICEID_NF4_65_SMB: case NFSMB_DEVICEID_NF4_67_SMB: case NFSMB_DEVICEID_NF4_73_SMB: case NFSMB_DEVICEID_NF4_78S_SMB: case NFSMB_DEVICEID_NF4_79_SMB: /* Trying to add secondary device as slave */ nfsmb_sc->subdev = device_add_child(dev, "nfsmb", DEVICE_UNIT_ANY); if (!nfsmb_sc->subdev) { nfsmb_detach(dev); return (EINVAL); } break; default: break; } bus_attach_children(dev); return (0); } static int nfsmbsub_detach(device_t dev) { device_t parent; struct nfsmb_softc *nfsmbsub_sc = device_get_softc(dev); + int error; parent = device_get_parent(dev); - if (nfsmbsub_sc->smbus) { - device_delete_child(dev, nfsmbsub_sc->smbus); - nfsmbsub_sc->smbus = NULL; - } + error = bus_generic_detach(dev); + if (error != 0) + return (error); mtx_destroy(&nfsmbsub_sc->lock); if (nfsmbsub_sc->res) { bus_release_resource(parent, SYS_RES_IOPORT, nfsmbsub_sc->rid, nfsmbsub_sc->res); nfsmbsub_sc->res = NULL; } return (0); } static int nfsmb_detach(device_t dev) { struct nfsmb_softc *nfsmb_sc = device_get_softc(dev); + int error; - if (nfsmb_sc->subdev) { - device_delete_child(dev, nfsmb_sc->subdev); - nfsmb_sc->subdev = NULL; - } - - if (nfsmb_sc->smbus) { - device_delete_child(dev, nfsmb_sc->smbus); - nfsmb_sc->smbus = NULL; - } + error = bus_generic_detach(dev); + if (error != 0) + return (error); mtx_destroy(&nfsmb_sc->lock); if (nfsmb_sc->res) { bus_release_resource(dev, SYS_RES_IOPORT, nfsmb_sc->rid, nfsmb_sc->res); nfsmb_sc->res = NULL; } return (0); } static int nfsmb_callback(device_t dev, int index, void *data) { int error = 0; switch (index) { case SMB_REQUEST_BUS: case SMB_RELEASE_BUS: break; default: error = EINVAL; } return (error); } static int nfsmb_wait(struct nfsmb_softc *sc) { u_char sts; int error, count; NFSMB_LOCK_ASSERT(sc); if (NFSMB_SMBINB(sc, SMB_PRTCL) != 0) { count = 10000; do { DELAY(500); } while (NFSMB_SMBINB(sc, SMB_PRTCL) != 0 && count--); if (count == 0) return (SMB_ETIMEOUT); } sts = NFSMB_SMBINB(sc, SMB_STS) & SMB_STS_STATUS; NFSMB_DEBUG(printf("nfsmb: STS=0x%x\n", sts)); switch (sts) { case SMB_STS_OK: error = SMB_ENOERR; break; case SMB_STS_DANA: error = SMB_ENOACK; break; case SMB_STS_B: error = SMB_EBUSY; break; case SMB_STS_T: error = SMB_ETIMEOUT; break; case SMB_STS_DCAD: case SMB_STS_DAD: case SMB_STS_HUP: error = SMB_ENOTSUPP; break; default: error = SMB_EBUSERR; break; } return (error); } static int nfsmb_quick(device_t dev, u_char slave, int how) { struct nfsmb_softc *sc = (struct nfsmb_softc *)device_get_softc(dev); u_char protocol; int error; protocol = SMB_PRTCL_QUICK; switch (how) { case SMB_QWRITE: protocol |= SMB_PRTCL_WRITE; NFSMB_DEBUG(printf("nfsmb: QWRITE to 0x%x", slave)); break; case SMB_QREAD: protocol |= SMB_PRTCL_READ; NFSMB_DEBUG(printf("nfsmb: QREAD to 0x%x", slave)); break; default: panic("%s: unknown QUICK command (%x)!", __func__, how); } NFSMB_LOCK(sc); NFSMB_SMBOUTB(sc, SMB_ADDR, slave); NFSMB_SMBOUTB(sc, SMB_PRTCL, protocol); error = nfsmb_wait(sc); NFSMB_DEBUG(printf(", error=0x%x\n", error)); NFSMB_UNLOCK(sc); return (error); } static int nfsmb_sendb(device_t dev, u_char slave, char byte) { struct nfsmb_softc *sc = (struct nfsmb_softc *)device_get_softc(dev); int error; NFSMB_LOCK(sc); NFSMB_SMBOUTB(sc, SMB_CMD, byte); NFSMB_SMBOUTB(sc, SMB_ADDR, slave); NFSMB_SMBOUTB(sc, SMB_PRTCL, SMB_PRTCL_WRITE | SMB_PRTCL_BYTE); error = nfsmb_wait(sc); NFSMB_DEBUG(printf("nfsmb: SENDB to 0x%x, byte=0x%x, error=0x%x\n", slave, byte, error)); NFSMB_UNLOCK(sc); return (error); } static int nfsmb_recvb(device_t dev, u_char slave, char *byte) { struct nfsmb_softc *sc = (struct nfsmb_softc *)device_get_softc(dev); int error; NFSMB_LOCK(sc); NFSMB_SMBOUTB(sc, SMB_ADDR, slave); NFSMB_SMBOUTB(sc, SMB_PRTCL, SMB_PRTCL_READ | SMB_PRTCL_BYTE); if ((error = nfsmb_wait(sc)) == SMB_ENOERR) *byte = NFSMB_SMBINB(sc, SMB_DATA); NFSMB_DEBUG(printf("nfsmb: RECVB from 0x%x, byte=0x%x, error=0x%x\n", slave, *byte, error)); NFSMB_UNLOCK(sc); return (error); } static int nfsmb_writeb(device_t dev, u_char slave, char cmd, char byte) { struct nfsmb_softc *sc = (struct nfsmb_softc *)device_get_softc(dev); int error; NFSMB_LOCK(sc); NFSMB_SMBOUTB(sc, SMB_CMD, cmd); NFSMB_SMBOUTB(sc, SMB_DATA, byte); NFSMB_SMBOUTB(sc, SMB_ADDR, slave); NFSMB_SMBOUTB(sc, SMB_PRTCL, SMB_PRTCL_WRITE | SMB_PRTCL_BYTE_DATA); error = nfsmb_wait(sc); NFSMB_DEBUG(printf("nfsmb: WRITEB to 0x%x, cmd=0x%x, byte=0x%x, error=0x%x\n", slave, cmd, byte, error)); NFSMB_UNLOCK(sc); return (error); } static int nfsmb_readb(device_t dev, u_char slave, char cmd, char *byte) { struct nfsmb_softc *sc = (struct nfsmb_softc *)device_get_softc(dev); int error; NFSMB_LOCK(sc); NFSMB_SMBOUTB(sc, SMB_CMD, cmd); NFSMB_SMBOUTB(sc, SMB_ADDR, slave); NFSMB_SMBOUTB(sc, SMB_PRTCL, SMB_PRTCL_READ | SMB_PRTCL_BYTE_DATA); if ((error = nfsmb_wait(sc)) == SMB_ENOERR) *byte = NFSMB_SMBINB(sc, SMB_DATA); NFSMB_DEBUG(printf("nfsmb: READB from 0x%x, cmd=0x%x, byte=0x%x, error=0x%x\n", slave, cmd, (unsigned char)*byte, error)); NFSMB_UNLOCK(sc); return (error); } static int nfsmb_writew(device_t dev, u_char slave, char cmd, short word) { struct nfsmb_softc *sc = (struct nfsmb_softc *)device_get_softc(dev); int error; NFSMB_LOCK(sc); NFSMB_SMBOUTB(sc, SMB_CMD, cmd); NFSMB_SMBOUTB(sc, SMB_DATA, word); NFSMB_SMBOUTB(sc, SMB_DATA + 1, word >> 8); NFSMB_SMBOUTB(sc, SMB_ADDR, slave); NFSMB_SMBOUTB(sc, SMB_PRTCL, SMB_PRTCL_WRITE | SMB_PRTCL_WORD_DATA); error = nfsmb_wait(sc); NFSMB_DEBUG(printf("nfsmb: WRITEW to 0x%x, cmd=0x%x, word=0x%x, error=0x%x\n", slave, cmd, word, error)); NFSMB_UNLOCK(sc); return (error); } static int nfsmb_readw(device_t dev, u_char slave, char cmd, short *word) { struct nfsmb_softc *sc = (struct nfsmb_softc *)device_get_softc(dev); int error; NFSMB_LOCK(sc); NFSMB_SMBOUTB(sc, SMB_CMD, cmd); NFSMB_SMBOUTB(sc, SMB_ADDR, slave); NFSMB_SMBOUTB(sc, SMB_PRTCL, SMB_PRTCL_READ | SMB_PRTCL_WORD_DATA); if ((error = nfsmb_wait(sc)) == SMB_ENOERR) *word = NFSMB_SMBINB(sc, SMB_DATA) | (NFSMB_SMBINB(sc, SMB_DATA + 1) << 8); NFSMB_DEBUG(printf("nfsmb: READW from 0x%x, cmd=0x%x, word=0x%x, error=0x%x\n", slave, cmd, (unsigned short)*word, error)); NFSMB_UNLOCK(sc); return (error); } static int nfsmb_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf) { struct nfsmb_softc *sc = (struct nfsmb_softc *)device_get_softc(dev); u_char i; int error; if (count < 1 || count > 32) return (SMB_EINVAL); NFSMB_LOCK(sc); NFSMB_SMBOUTB(sc, SMB_CMD, cmd); NFSMB_SMBOUTB(sc, SMB_BCNT, count); for (i = 0; i < count; i++) NFSMB_SMBOUTB(sc, SMB_DATA + i, buf[i]); NFSMB_SMBOUTB(sc, SMB_ADDR, slave); NFSMB_SMBOUTB(sc, SMB_PRTCL, SMB_PRTCL_WRITE | SMB_PRTCL_BLOCK_DATA); error = nfsmb_wait(sc); NFSMB_DEBUG(printf("nfsmb: WRITEBLK to 0x%x, count=0x%x, cmd=0x%x, error=0x%x", slave, count, cmd, error)); NFSMB_UNLOCK(sc); return (error); } static int nfsmb_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf) { struct nfsmb_softc *sc = (struct nfsmb_softc *)device_get_softc(dev); u_char data, len, i; int error; if (*count < 1 || *count > 32) return (SMB_EINVAL); NFSMB_LOCK(sc); NFSMB_SMBOUTB(sc, SMB_CMD, cmd); NFSMB_SMBOUTB(sc, SMB_ADDR, slave); NFSMB_SMBOUTB(sc, SMB_PRTCL, SMB_PRTCL_READ | SMB_PRTCL_BLOCK_DATA); if ((error = nfsmb_wait(sc)) == SMB_ENOERR) { len = NFSMB_SMBINB(sc, SMB_BCNT); for (i = 0; i < len; i++) { data = NFSMB_SMBINB(sc, SMB_DATA + i); if (i < *count) buf[i] = data; } *count = len; } NFSMB_DEBUG(printf("nfsmb: READBLK to 0x%x, count=0x%x, cmd=0x%x, error=0x%x", slave, *count, cmd, error)); NFSMB_UNLOCK(sc); return (error); } static device_method_t nfsmb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nfsmb_probe), DEVMETHOD(device_attach, nfsmb_attach), DEVMETHOD(device_detach, nfsmb_detach), /* SMBus interface */ DEVMETHOD(smbus_callback, nfsmb_callback), DEVMETHOD(smbus_quick, nfsmb_quick), DEVMETHOD(smbus_sendb, nfsmb_sendb), DEVMETHOD(smbus_recvb, nfsmb_recvb), DEVMETHOD(smbus_writeb, nfsmb_writeb), DEVMETHOD(smbus_readb, nfsmb_readb), DEVMETHOD(smbus_writew, nfsmb_writew), DEVMETHOD(smbus_readw, nfsmb_readw), DEVMETHOD(smbus_bwrite, nfsmb_bwrite), DEVMETHOD(smbus_bread, nfsmb_bread), { 0, 0 } }; static device_method_t nfsmbsub_methods[] = { /* Device interface */ DEVMETHOD(device_probe, nfsmbsub_probe), DEVMETHOD(device_attach, nfsmbsub_attach), DEVMETHOD(device_detach, nfsmbsub_detach), /* SMBus interface */ DEVMETHOD(smbus_callback, nfsmb_callback), DEVMETHOD(smbus_quick, nfsmb_quick), DEVMETHOD(smbus_sendb, nfsmb_sendb), DEVMETHOD(smbus_recvb, nfsmb_recvb), DEVMETHOD(smbus_writeb, nfsmb_writeb), DEVMETHOD(smbus_readb, nfsmb_readb), DEVMETHOD(smbus_writew, nfsmb_writew), DEVMETHOD(smbus_readw, nfsmb_readw), DEVMETHOD(smbus_bwrite, nfsmb_bwrite), DEVMETHOD(smbus_bread, nfsmb_bread), { 0, 0 } }; static driver_t nfsmb_driver = { "nfsmb", nfsmb_methods, sizeof(struct nfsmb_softc), }; static driver_t nfsmbsub_driver = { "nfsmb", nfsmbsub_methods, sizeof(struct nfsmb_softc), }; DRIVER_MODULE(nfsmb, pci, nfsmb_driver, 0, 0); DRIVER_MODULE(nfsmb, nfsmb, nfsmbsub_driver, 0, 0); DRIVER_MODULE(smbus, nfsmb, smbus_driver, 0, 0); MODULE_DEPEND(nfsmb, pci, 1, 1, 1); MODULE_DEPEND(nfsmb, smbus, SMBUS_MINVER, SMBUS_PREFVER, SMBUS_MAXVER); MODULE_VERSION(nfsmb, 1); diff --git a/sys/dev/puc/puc.c b/sys/dev/puc/puc.c index 01193acf85b4..e10d0374c7de 100644 --- a/sys/dev/puc/puc.c +++ b/sys/dev/puc/puc.c @@ -1,759 +1,757 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2006 Marcel Moolenaar * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define PUC_ISRCCNT 5 struct puc_port { struct puc_bar *p_bar; struct resource *p_rres; struct resource *p_ires; device_t p_dev; int p_nr; int p_type; int p_rclk; bool p_hasintr:1; serdev_intr_t *p_ihsrc[PUC_ISRCCNT]; void *p_iharg; int p_ipend; }; const char puc_driver_name[] = "puc"; static MALLOC_DEFINE(M_PUC, "PUC", "PUC driver"); SYSCTL_NODE(_hw, OID_AUTO, puc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "puc(4) driver configuration"); struct puc_bar * puc_get_bar(struct puc_softc *sc, int rid) { struct puc_bar *bar; struct rman *rm; rman_res_t end, start; int error, i; /* Find the BAR entry with the given RID. */ i = 0; while (i < PUC_PCI_BARS && sc->sc_bar[i].b_rid != rid) i++; if (i < PUC_PCI_BARS) return (&sc->sc_bar[i]); /* Not found. If we're looking for an unused entry, return NULL. */ if (rid == -1) return (NULL); /* Get an unused entry for us to fill. */ bar = puc_get_bar(sc, -1); if (bar == NULL) return (NULL); bar->b_rid = rid; bar->b_type = SYS_RES_IOPORT; bar->b_res = bus_alloc_resource_any(sc->sc_dev, bar->b_type, &bar->b_rid, RF_ACTIVE); if (bar->b_res == NULL) { bar->b_rid = rid; bar->b_type = SYS_RES_MEMORY; bar->b_res = bus_alloc_resource_any(sc->sc_dev, bar->b_type, &bar->b_rid, RF_ACTIVE); if (bar->b_res == NULL) { bar->b_rid = -1; return (NULL); } } /* Update our managed space. */ rm = (bar->b_type == SYS_RES_IOPORT) ? &sc->sc_ioport : &sc->sc_iomem; start = rman_get_start(bar->b_res); end = rman_get_end(bar->b_res); error = rman_manage_region(rm, start, end); if (error) { bus_release_resource(sc->sc_dev, bar->b_type, bar->b_rid, bar->b_res); bar->b_res = NULL; bar->b_rid = -1; bar = NULL; } return (bar); } static int puc_intr(void *arg) { struct puc_port *port; struct puc_softc *sc = arg; u_long ds, dev, devs; int i, idx, ipend, isrc, nints; uint8_t ilr; nints = 0; while (1) { /* * Obtain the set of devices with pending interrupts. */ devs = sc->sc_serdevs; if (sc->sc_ilr == PUC_ILR_DIGI) { idx = 0; while (devs & (0xfful << idx)) { ilr = ~bus_read_1(sc->sc_port[idx].p_rres, 7); devs &= ~0ul ^ ((u_long)ilr << idx); idx += 8; } } else if (sc->sc_ilr == PUC_ILR_QUATECH) { /* * Don't trust the value if it's the same as the option * register. It may mean that the ILR is not active and * we're reading the option register instead. This may * lead to false positives on 8-port boards. */ ilr = bus_read_1(sc->sc_port[0].p_rres, 7); if (ilr != (sc->sc_cfg_data & 0xff)) devs &= (u_long)ilr; } if (devs == 0UL) break; /* * Obtain the set of interrupt sources from those devices * that have pending interrupts. */ ipend = 0; idx = 0, dev = 1UL; ds = devs; while (ds != 0UL) { while ((ds & dev) == 0UL) idx++, dev <<= 1; ds &= ~dev; port = &sc->sc_port[idx]; port->p_ipend = SERDEV_IPEND(port->p_dev); ipend |= port->p_ipend; } if (ipend == 0) break; i = 0, isrc = SER_INT_OVERRUN; while (ipend) { while (i < PUC_ISRCCNT && !(ipend & isrc)) i++, isrc <<= 1; KASSERT(i < PUC_ISRCCNT, ("%s", __func__)); ipend &= ~isrc; idx = 0, dev = 1UL; ds = devs; while (ds != 0UL) { while ((ds & dev) == 0UL) idx++, dev <<= 1; ds &= ~dev; port = &sc->sc_port[idx]; if (!(port->p_ipend & isrc)) continue; if (port->p_ihsrc[i] != NULL) (*port->p_ihsrc[i])(port->p_iharg); nints++; } } } return ((nints > 0) ? FILTER_HANDLED : FILTER_STRAY); } int puc_bfe_attach(device_t dev) { char buffer[64]; struct puc_bar *bar; struct puc_port *port; struct puc_softc *sc; struct rman *rm; intptr_t res; bus_addr_t ofs, start; bus_size_t size; bus_space_handle_t bsh; bus_space_tag_t bst; int error, idx; sc = device_get_softc(dev); for (idx = 0; idx < PUC_PCI_BARS; idx++) sc->sc_bar[idx].b_rid = -1; do { sc->sc_ioport.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_ioport); if (!error) { sc->sc_iomem.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_iomem); if (!error) { sc->sc_irq.rm_type = RMAN_ARRAY; error = rman_init(&sc->sc_irq); if (!error) break; rman_fini(&sc->sc_iomem); } rman_fini(&sc->sc_ioport); } return (error); } while (0); snprintf(buffer, sizeof(buffer), "%s I/O port mapping", device_get_nameunit(dev)); sc->sc_ioport.rm_descr = strdup(buffer, M_PUC); snprintf(buffer, sizeof(buffer), "%s I/O memory mapping", device_get_nameunit(dev)); sc->sc_iomem.rm_descr = strdup(buffer, M_PUC); snprintf(buffer, sizeof(buffer), "%s port numbers", device_get_nameunit(dev)); sc->sc_irq.rm_descr = strdup(buffer, M_PUC); error = puc_config(sc, PUC_CFG_GET_NPORTS, 0, &res); KASSERT(error == 0, ("%s %d", __func__, __LINE__)); sc->sc_nports = (int)res; sc->sc_port = malloc(sc->sc_nports * sizeof(struct puc_port), M_PUC, M_WAITOK|M_ZERO); error = rman_manage_region(&sc->sc_irq, 1, sc->sc_nports); if (error) goto fail; error = puc_config(sc, PUC_CFG_SETUP, 0, &res); if (error) goto fail; for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; port->p_nr = idx + 1; error = puc_config(sc, PUC_CFG_GET_TYPE, idx, &res); if (error) goto fail; port->p_type = res; error = puc_config(sc, PUC_CFG_GET_RID, idx, &res); if (error) goto fail; bar = puc_get_bar(sc, res); if (bar == NULL) { error = ENXIO; goto fail; } port->p_bar = bar; start = rman_get_start(bar->b_res); error = puc_config(sc, PUC_CFG_GET_OFS, idx, &res); if (error) goto fail; ofs = res; error = puc_config(sc, PUC_CFG_GET_LEN, idx, &res); if (error) goto fail; size = res; rm = (bar->b_type == SYS_RES_IOPORT) ? &sc->sc_ioport: &sc->sc_iomem; port->p_rres = rman_reserve_resource(rm, start + ofs, start + ofs + size - 1, size, 0, NULL); if (port->p_rres != NULL) { bsh = rman_get_bushandle(bar->b_res); bst = rman_get_bustag(bar->b_res); bus_space_subregion(bst, bsh, ofs, size, &bsh); rman_set_bushandle(port->p_rres, bsh); rman_set_bustag(port->p_rres, bst); } port->p_ires = rman_reserve_resource(&sc->sc_irq, port->p_nr, port->p_nr, 1, 0, NULL); if (port->p_ires == NULL) { error = ENXIO; goto fail; } error = puc_config(sc, PUC_CFG_GET_CLOCK, idx, &res); if (error) goto fail; port->p_rclk = res; port->p_dev = device_add_child(dev, NULL, DEVICE_UNIT_ANY); if (port->p_dev != NULL) device_set_ivars(port->p_dev, (void *)port); } error = puc_config(sc, PUC_CFG_GET_ILR, 0, &res); if (error) goto fail; sc->sc_ilr = res; if (bootverbose && sc->sc_ilr != 0) device_printf(dev, "using interrupt latch register\n"); sc->sc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irid, RF_ACTIVE|RF_SHAREABLE); if (sc->sc_ires != NULL) { error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY, puc_intr, NULL, sc, &sc->sc_icookie); if (error) error = bus_setup_intr(dev, sc->sc_ires, INTR_TYPE_TTY | INTR_MPSAFE, NULL, (driver_intr_t *)puc_intr, sc, &sc->sc_icookie); else sc->sc_fastintr = 1; if (error) { device_printf(dev, "could not activate interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); sc->sc_ires = NULL; } } if (sc->sc_ires == NULL) { /* XXX no interrupt resource. Force polled mode. */ sc->sc_polled = 1; } /* Probe and attach our children. */ for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; if (port->p_dev == NULL) continue; error = device_probe_and_attach(port->p_dev); if (error) { device_delete_child(dev, port->p_dev); port->p_dev = NULL; } } /* * If there are no serdev devices, then our interrupt handler * will do nothing. Tear it down. */ if (sc->sc_serdevs == 0UL) bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); return (0); fail: for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; if (port->p_dev != NULL) device_delete_child(dev, port->p_dev); if (port->p_rres != NULL) rman_release_resource(port->p_rres); if (port->p_ires != NULL) rman_release_resource(port->p_ires); } for (idx = 0; idx < PUC_PCI_BARS; idx++) { bar = &sc->sc_bar[idx]; if (bar->b_res != NULL) bus_release_resource(sc->sc_dev, bar->b_type, bar->b_rid, bar->b_res); } rman_fini(&sc->sc_irq); free(__DECONST(void *, sc->sc_irq.rm_descr), M_PUC); rman_fini(&sc->sc_iomem); free(__DECONST(void *, sc->sc_iomem.rm_descr), M_PUC); rman_fini(&sc->sc_ioport); free(__DECONST(void *, sc->sc_ioport.rm_descr), M_PUC); free(sc->sc_port, M_PUC); return (error); } int puc_bfe_detach(device_t dev) { struct puc_bar *bar; struct puc_port *port; struct puc_softc *sc; int error, idx; sc = device_get_softc(dev); /* Detach our children. */ - error = 0; + error = bus_generic_detach(dev); + if (error != 0) + return (error); + for (idx = 0; idx < sc->sc_nports; idx++) { port = &sc->sc_port[idx]; if (port->p_dev == NULL) continue; - if (device_delete_child(dev, port->p_dev) == 0) { - if (port->p_rres != NULL) - rman_release_resource(port->p_rres); - if (port->p_ires != NULL) - rman_release_resource(port->p_ires); - } else - error = ENXIO; + if (port->p_rres != NULL) + rman_release_resource(port->p_rres); + if (port->p_ires != NULL) + rman_release_resource(port->p_ires); } - if (error) - return (error); if (sc->sc_serdevs != 0UL) bus_teardown_intr(dev, sc->sc_ires, sc->sc_icookie); bus_release_resource(dev, SYS_RES_IRQ, sc->sc_irid, sc->sc_ires); for (idx = 0; idx < PUC_PCI_BARS; idx++) { bar = &sc->sc_bar[idx]; if (bar->b_res != NULL) bus_release_resource(sc->sc_dev, bar->b_type, bar->b_rid, bar->b_res); } rman_fini(&sc->sc_irq); free(__DECONST(void *, sc->sc_irq.rm_descr), M_PUC); rman_fini(&sc->sc_iomem); free(__DECONST(void *, sc->sc_iomem.rm_descr), M_PUC); rman_fini(&sc->sc_ioport); free(__DECONST(void *, sc->sc_ioport.rm_descr), M_PUC); free(sc->sc_port, M_PUC); return (0); } int puc_bfe_probe(device_t dev, const struct puc_cfg *cfg) { struct puc_softc *sc; intptr_t res; int error; sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_cfg = cfg; /* We don't attach to single-port serial cards. */ if (cfg->ports == PUC_PORT_1S || cfg->ports == PUC_PORT_1P) return (EDOOFUS); error = puc_config(sc, PUC_CFG_GET_NPORTS, 0, &res); if (error) return (error); error = puc_config(sc, PUC_CFG_GET_DESC, 0, &res); if (error) return (error); if (res != 0) device_set_desc(dev, (const char *)res); return (BUS_PROBE_DEFAULT); } struct resource * puc_bus_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct puc_port *port; struct resource *res; device_t assigned, originator; int error; /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (NULL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (rid == NULL || *rid != 0) return (NULL); /* We only support default allocations. */ if (!RMAN_IS_DEFAULT_RANGE(start, end)) return (NULL); if (type == port->p_bar->b_type) res = port->p_rres; else if (type == SYS_RES_IRQ) res = port->p_ires; else return (NULL); if (res == NULL) return (NULL); assigned = rman_get_device(res); if (assigned == NULL) /* Not allocated */ rman_set_device(res, originator); else if (assigned != originator) return (NULL); if (flags & RF_ACTIVE) { error = rman_activate_resource(res); if (error) { if (assigned == NULL) rman_set_device(res, NULL); return (NULL); } } return (res); } int puc_bus_release_resource(device_t dev, device_t child, struct resource *res) { struct puc_port *port; device_t originator; /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (res == NULL) return (EINVAL); if (res == port->p_ires) { if (port->p_hasintr) return (EBUSY); } else if (res != port->p_rres) return (EINVAL); if (rman_get_device(res) != originator) return (ENXIO); if (rman_get_flags(res) & RF_ACTIVE) rman_deactivate_resource(res); rman_set_device(res, NULL); return (0); } int puc_bus_get_resource(device_t dev, device_t child, int type, int rid, rman_res_t *startp, rman_res_t *countp) { struct puc_port *port; struct resource *res; rman_res_t start; /* Get our immediate child. */ while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (type == port->p_bar->b_type) res = port->p_rres; else if (type == SYS_RES_IRQ) res = port->p_ires; else return (ENXIO); if (rid != 0 || res == NULL) return (ENXIO); start = rman_get_start(res); if (startp != NULL) *startp = start; if (countp != NULL) *countp = rman_get_end(res) - start + 1; return (0); } int puc_bus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, void (*ihand)(void *), void *arg, void **cookiep) { struct puc_port *port; struct puc_softc *sc; device_t originator; int i, isrc, serdev; sc = device_get_softc(dev); /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (cookiep == NULL || res != port->p_ires) return (EINVAL); /* We demand that serdev devices use filter_only interrupts. */ if (port->p_type == PUC_TYPE_SERIAL && ihand != NULL) return (ENXIO); if (rman_get_device(port->p_ires) != originator) return (ENXIO); /* * Have non-serdev ports handled by the bus implementation. It * supports multiple handlers for a single interrupt as it is, * so we wouldn't add value if we did it ourselves. */ serdev = 0; if (port->p_type == PUC_TYPE_SERIAL) { i = 0, isrc = SER_INT_OVERRUN; while (i < PUC_ISRCCNT) { port->p_ihsrc[i] = SERDEV_IHAND(originator, isrc); if (port->p_ihsrc[i] != NULL) serdev = 1; i++, isrc <<= 1; } } if (!serdev) return (BUS_SETUP_INTR(device_get_parent(dev), originator, sc->sc_ires, flags, filt, ihand, arg, cookiep)); sc->sc_serdevs |= 1UL << (port->p_nr - 1); port->p_hasintr = 1; port->p_iharg = arg; *cookiep = port; return (0); } int puc_bus_teardown_intr(device_t dev, device_t child, struct resource *res, void *cookie) { struct puc_port *port; struct puc_softc *sc; device_t originator; int i; sc = device_get_softc(dev); /* Get our immediate child. */ originator = child; while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (res != port->p_ires) return (EINVAL); if (rman_get_device(port->p_ires) != originator) return (ENXIO); if (!port->p_hasintr) return (BUS_TEARDOWN_INTR(device_get_parent(dev), originator, sc->sc_ires, cookie)); if (cookie != port) return (EINVAL); port->p_hasintr = 0; port->p_iharg = NULL; for (i = 0; i < PUC_ISRCCNT; i++) port->p_ihsrc[i] = NULL; return (0); } int puc_bus_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct puc_port *port; /* Get our immediate child. */ while (child != NULL && device_get_parent(child) != dev) child = device_get_parent(child); if (child == NULL) return (EINVAL); port = device_get_ivars(child); KASSERT(port != NULL, ("%s %d", __func__, __LINE__)); if (result == NULL) return (EINVAL); switch(index) { case PUC_IVAR_CLOCK: *result = port->p_rclk; break; case PUC_IVAR_TYPE: *result = port->p_type; break; default: return (ENOENT); } return (0); } int puc_bus_print_child(device_t dev, device_t child) { struct puc_port *port; int retval; port = device_get_ivars(child); retval = 0; retval += bus_print_child_header(dev, child); retval += printf(" at port %d", port->p_nr); retval += bus_print_child_footer(dev, child); return (retval); } int puc_bus_child_location(device_t dev, device_t child, struct sbuf *sb) { struct puc_port *port; port = device_get_ivars(child); sbuf_printf(sb, "port=%d", port->p_nr); return (0); } int puc_bus_child_pnpinfo(device_t dev, device_t child, struct sbuf *sb) { struct puc_port *port; port = device_get_ivars(child); sbuf_printf(sb, "type=%d", port->p_type); return (0); } diff --git a/sys/dev/sound/pci/hda/hdac.c b/sys/dev/sound/pci/hda/hdac.c index c9b11de01d18..4f9f70d98cb6 100644 --- a/sys/dev/sound/pci/hda/hdac.c +++ b/sys/dev/sound/pci/hda/hdac.c @@ -1,2184 +1,2175 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2006 Stephane E. Potvin * Copyright (c) 2006 Ariff Abdullah * Copyright (c) 2008-2012 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * Intel High Definition Audio (Controller) driver for FreeBSD. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_snd.h" #endif #include #include #include #include #include #include #include #include #include #include #define HDA_DRV_TEST_REV "20120126_0002" #define hdac_lock(sc) snd_mtxlock((sc)->lock) #define hdac_unlock(sc) snd_mtxunlock((sc)->lock) #define hdac_lockassert(sc) snd_mtxassert((sc)->lock) #define HDAC_QUIRK_64BIT (1 << 0) #define HDAC_QUIRK_DMAPOS (1 << 1) #define HDAC_QUIRK_MSI (1 << 2) static const struct { const char *key; uint32_t value; } hdac_quirks_tab[] = { { "64bit", HDAC_QUIRK_64BIT }, { "dmapos", HDAC_QUIRK_DMAPOS }, { "msi", HDAC_QUIRK_MSI }, }; MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller"); static const struct { uint32_t model; const char *desc; char quirks_on; char quirks_off; } hdac_devices[] = { { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 }, { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 }, { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 }, { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 }, { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 }, { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 }, { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 }, { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 }, { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 }, { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 }, { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 }, { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 }, { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 }, { HDA_INTEL_BR, "Intel Braswell", 0, 0 }, { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 }, { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 }, { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 }, { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 }, { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 }, { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 }, { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 }, { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 }, { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 }, { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 }, { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 }, { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 }, { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 }, { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 }, { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 }, { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 }, { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 }, { HDA_INTEL_TGLKH, "Intel Tiger Lake-H", 0, 0 }, { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 }, { HDA_INTEL_ALLK, "Intel Alder Lake", 0, 0 }, { HDA_INTEL_ALLKM, "Intel Alder Lake-M", 0, 0 }, { HDA_INTEL_ALLKN, "Intel Alder Lake-N", 0, 0 }, { HDA_INTEL_ALLKP1, "Intel Alder Lake-P", 0, 0 }, { HDA_INTEL_ALLKP2, "Intel Alder Lake-P", 0, 0 }, { HDA_INTEL_ALLKPS, "Intel Alder Lake-PS", 0, 0 }, { HDA_INTEL_RPTLK1, "Intel Raptor Lake-P", 0, 0 }, { HDA_INTEL_RPTLK2, "Intel Raptor Lake-P", 0, 0 }, { HDA_INTEL_MTL, "Intel Meteor Lake-P", 0, 0 }, { HDA_INTEL_ARLS, "Intel Arrow Lake-S", 0, 0 }, { HDA_INTEL_ARL, "Intel Arrow Lake", 0, 0 }, { HDA_INTEL_LNLP, "Intel Lunar Lake-P", 0, 0 }, { HDA_INTEL_82801F, "Intel 82801F", 0, 0 }, { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 }, { HDA_INTEL_82801G, "Intel 82801G", 0, 0 }, { HDA_INTEL_82801H, "Intel 82801H", 0, 0 }, { HDA_INTEL_82801I, "Intel 82801I", 0, 0 }, { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 }, { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 }, { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 }, { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 }, { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 }, { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 }, { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 }, { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 }, { HDA_INTEL_SCH, "Intel SCH", 0, 0 }, { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 }, { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 }, { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 }, { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 }, { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 }, { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 }, { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 }, { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 }, { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT }, { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 }, { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 }, { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 }, { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 }, { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 }, { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 }, { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 }, { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 }, { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 }, { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI }, { HDA_ATI_SB450, "ATI SB450", 0, 0 }, { HDA_ATI_SB600, "ATI SB600", 0, 0 }, { HDA_ATI_RS600, "ATI RS600", 0, 0 }, { HDA_ATI_RS690, "ATI RS690", 0, 0 }, { HDA_ATI_RS780, "ATI RS780", 0, 0 }, { HDA_ATI_RS880, "ATI RS880", 0, 0 }, { HDA_ATI_R600, "ATI R600", 0, 0 }, { HDA_ATI_RV610, "ATI RV610", 0, 0 }, { HDA_ATI_RV620, "ATI RV620", 0, 0 }, { HDA_ATI_RV630, "ATI RV630", 0, 0 }, { HDA_ATI_RV635, "ATI RV635", 0, 0 }, { HDA_ATI_RV710, "ATI RV710", 0, 0 }, { HDA_ATI_RV730, "ATI RV730", 0, 0 }, { HDA_ATI_RV740, "ATI RV740", 0, 0 }, { HDA_ATI_RV770, "ATI RV770", 0, 0 }, { HDA_ATI_RV810, "ATI RV810", 0, 0 }, { HDA_ATI_RV830, "ATI RV830", 0, 0 }, { HDA_ATI_RV840, "ATI RV840", 0, 0 }, { HDA_ATI_RV870, "ATI RV870", 0, 0 }, { HDA_ATI_RV910, "ATI RV910", 0, 0 }, { HDA_ATI_RV930, "ATI RV930", 0, 0 }, { HDA_ATI_RV940, "ATI RV940", 0, 0 }, { HDA_ATI_RV970, "ATI RV970", 0, 0 }, { HDA_ATI_R1000, "ATI R1000", 0, 0 }, { HDA_ATI_OLAND, "ATI Oland", 0, 0 }, { HDA_ATI_KABINI, "ATI Kabini", 0, 0 }, { HDA_ATI_TRINITY, "ATI Trinity", 0, 0 }, { HDA_AMD_X370, "AMD X370", 0, 0 }, { HDA_AMD_X570, "AMD X570", 0, 0 }, { HDA_AMD_STONEY, "AMD Stoney", 0, 0 }, { HDA_AMD_RAVEN, "AMD Raven", 0, 0 }, { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 }, { HDA_RDC_M3010, "RDC M3010", 0, 0 }, { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 }, { HDA_VMWARE, "VMware", 0, 0 }, { HDA_SIS_966, "SiS 966/968", 0, 0 }, { HDA_ULI_M5461, "ULI M5461", 0, 0 }, { HDA_CREATIVE_SB1570, "Creative SB Audigy FX", 0, HDAC_QUIRK_64BIT }, /* Unknown */ { HDA_INTEL_ALL, "Intel", 0, 0 }, { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 }, { HDA_ATI_ALL, "ATI", 0, 0 }, { HDA_AMD_ALL, "AMD", 0, 0 }, { HDA_CREATIVE_ALL, "Creative", 0, 0 }, { HDA_VIA_ALL, "VIA", 0, 0 }, { HDA_VMWARE_ALL, "VMware", 0, 0 }, { HDA_SIS_ALL, "SiS", 0, 0 }, { HDA_ULI_ALL, "ULI", 0, 0 }, }; static const struct { uint16_t vendor; uint8_t reg; uint8_t mask; uint8_t enable; } hdac_pcie_snoop[] = { { INTEL_VENDORID, 0x00, 0x00, 0x00 }, { ATI_VENDORID, 0x42, 0xf8, 0x02 }, { AMD_VENDORID, 0x42, 0xf8, 0x02 }, { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f }, }; /**************************************************************************** * Function prototypes ****************************************************************************/ static void hdac_intr_handler(void *); static int hdac_reset(struct hdac_softc *, bool); static int hdac_get_capabilities(struct hdac_softc *); static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int); static int hdac_dma_alloc(struct hdac_softc *, struct hdac_dma *, bus_size_t); static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *); static int hdac_mem_alloc(struct hdac_softc *); static void hdac_mem_free(struct hdac_softc *); static int hdac_irq_alloc(struct hdac_softc *); static void hdac_irq_free(struct hdac_softc *); static void hdac_corb_init(struct hdac_softc *); static void hdac_rirb_init(struct hdac_softc *); static void hdac_corb_start(struct hdac_softc *); static void hdac_rirb_start(struct hdac_softc *); static void hdac_attach2(void *); static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t); static int hdac_probe(device_t); static int hdac_attach(device_t); static int hdac_detach(device_t); static int hdac_suspend(device_t); static int hdac_resume(device_t); static int hdac_rirb_flush(struct hdac_softc *sc); static int hdac_unsolq_flush(struct hdac_softc *sc); /* This function surely going to make its way into upper level someday. */ static void hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off) { const char *res = NULL; int i = 0, j, k, len, inv; if (resource_string_value(device_get_name(sc->dev), device_get_unit(sc->dev), "config", &res) != 0) return; if (!(res != NULL && strlen(res) > 0)) return; HDA_BOOTVERBOSE( device_printf(sc->dev, "Config options:"); ); for (;;) { while (res[i] != '\0' && (res[i] == ',' || isspace(res[i]) != 0)) i++; if (res[i] == '\0') { HDA_BOOTVERBOSE( printf("\n"); ); return; } j = i; while (res[j] != '\0' && !(res[j] == ',' || isspace(res[j]) != 0)) j++; len = j - i; if (len > 2 && strncmp(res + i, "no", 2) == 0) inv = 2; else inv = 0; for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) { if (strncmp(res + i + inv, hdac_quirks_tab[k].key, len - inv) != 0) continue; if (len - inv != strlen(hdac_quirks_tab[k].key)) continue; HDA_BOOTVERBOSE( printf(" %s%s", (inv != 0) ? "no" : "", hdac_quirks_tab[k].key); ); if (inv == 0) { *on |= hdac_quirks_tab[k].value; *off &= ~hdac_quirks_tab[k].value; } else if (inv != 0) { *off |= hdac_quirks_tab[k].value; *on &= ~hdac_quirks_tab[k].value; } break; } i = j; } } static void hdac_one_intr(struct hdac_softc *sc, uint32_t intsts) { device_t dev; uint8_t rirbsts; int i; /* Was this a controller interrupt? */ if (intsts & HDAC_INTSTS_CIS) { /* * Placeholder: if we ever enable any bits in HDAC_WAKEEN, then * we will need to check and clear HDAC_STATESTS. * That event is used to report codec status changes such as * a reset or a wake-up event. */ /* * Placeholder: if we ever enable HDAC_CORBCTL_CMEIE, then we * will need to check and clear HDAC_CORBSTS_CMEI in * HDAC_CORBSTS. * That event is used to report CORB memory errors. */ /* * Placeholder: if we ever enable HDAC_RIRBCTL_RIRBOIC, then we * will need to check and clear HDAC_RIRBSTS_RIRBOIS in * HDAC_RIRBSTS. * That event is used to report response FIFO overruns. */ /* Get as many responses that we can */ rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); while (rirbsts & HDAC_RIRBSTS_RINTFL) { HDAC_WRITE_1(&sc->mem, HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL); hdac_rirb_flush(sc); rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS); } if (sc->unsolq_rp != sc->unsolq_wp) taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); } if (intsts & HDAC_INTSTS_SIS_MASK) { for (i = 0; i < sc->num_ss; i++) { if ((intsts & (1 << i)) == 0) continue; HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS, HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); if ((dev = sc->streams[i].dev) != NULL) { HDAC_STREAM_INTR(dev, sc->streams[i].dir, sc->streams[i].stream); } } } } /**************************************************************************** * void hdac_intr_handler(void *) * * Interrupt handler. Processes interrupts received from the hdac. ****************************************************************************/ static void hdac_intr_handler(void *context) { struct hdac_softc *sc; uint32_t intsts; sc = (struct hdac_softc *)context; /* * Loop until HDAC_INTSTS_GIS gets clear. * It is plausible that hardware interrupts a host only when GIS goes * from zero to one. GIS is formed by OR-ing multiple hardware * statuses, so it's possible that a previously cleared status gets set * again while another status has not been cleared yet. Thus, there * will be no new interrupt as GIS always stayed set. If we don't * re-examine GIS then we can leave it set and never get an interrupt * again. */ hdac_lock(sc); intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); while (intsts != 0xffffffff && (intsts & HDAC_INTSTS_GIS) != 0) { hdac_one_intr(sc, intsts); intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS); } hdac_unlock(sc); } static void hdac_poll_callback(void *arg) { struct hdac_softc *sc = arg; if (sc == NULL) return; hdac_lock(sc); if (sc->polling == 0) { hdac_unlock(sc); return; } callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc); hdac_unlock(sc); hdac_intr_handler(sc); } /**************************************************************************** * int hdac_reset(hdac_softc *, bool) * * Reset the hdac to a quiescent and known state. ****************************************************************************/ static int hdac_reset(struct hdac_softc *sc, bool wakeup) { uint32_t gctl; int count, i; /* * Stop all Streams DMA engine */ for (i = 0; i < sc->num_iss; i++) HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0); for (i = 0; i < sc->num_oss; i++) HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0); for (i = 0; i < sc->num_bss; i++) HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0); /* * Stop Control DMA engines. */ HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0); HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0); /* * Reset DMA position buffer. */ HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0); HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0); /* * Reset the controller. The reset must remain asserted for * a minimum of 100us. */ gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST); count = 10000; do { gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); if (!(gctl & HDAC_GCTL_CRST)) break; DELAY(10); } while (--count); if (gctl & HDAC_GCTL_CRST) { device_printf(sc->dev, "Unable to put hdac in reset\n"); return (ENXIO); } /* If wakeup is not requested - leave the controller in reset state. */ if (!wakeup) return (0); DELAY(100); gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST); count = 10000; do { gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL); if (gctl & HDAC_GCTL_CRST) break; DELAY(10); } while (--count); if (!(gctl & HDAC_GCTL_CRST)) { device_printf(sc->dev, "Device stuck in reset\n"); return (ENXIO); } /* * Wait for codecs to finish their own reset sequence. The delay here * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery). */ DELAY(1000); return (0); } /**************************************************************************** * int hdac_get_capabilities(struct hdac_softc *); * * Retreive the general capabilities of the hdac; * Number of Input Streams * Number of Output Streams * Number of bidirectional Streams * 64bit ready * CORB and RIRB sizes ****************************************************************************/ static int hdac_get_capabilities(struct hdac_softc *sc) { uint16_t gcap; uint8_t corbsize, rirbsize; gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP); sc->num_iss = HDAC_GCAP_ISS(gcap); sc->num_oss = HDAC_GCAP_OSS(gcap); sc->num_bss = HDAC_GCAP_BSS(gcap); sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss; sc->num_sdo = HDAC_GCAP_NSDO(gcap); sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0; if (sc->quirks_on & HDAC_QUIRK_64BIT) sc->support_64bit = 1; else if (sc->quirks_off & HDAC_QUIRK_64BIT) sc->support_64bit = 0; corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE); if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) == HDAC_CORBSIZE_CORBSZCAP_256) sc->corb_size = 256; else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) == HDAC_CORBSIZE_CORBSZCAP_16) sc->corb_size = 16; else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) == HDAC_CORBSIZE_CORBSZCAP_2) sc->corb_size = 2; else { device_printf(sc->dev, "%s: Invalid corb size (%x)\n", __func__, corbsize); return (ENXIO); } rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE); if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) == HDAC_RIRBSIZE_RIRBSZCAP_256) sc->rirb_size = 256; else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) == HDAC_RIRBSIZE_RIRBSZCAP_16) sc->rirb_size = 16; else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) == HDAC_RIRBSIZE_RIRBSZCAP_2) sc->rirb_size = 2; else { device_printf(sc->dev, "%s: Invalid rirb size (%x)\n", __func__, rirbsize); return (ENXIO); } HDA_BOOTVERBOSE( device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, " "NSDO %d%s, CORB %d, RIRB %d\n", sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo, sc->support_64bit ? ", 64bit" : "", sc->corb_size, sc->rirb_size); ); return (0); } /**************************************************************************** * void hdac_dma_cb * * This function is called by bus_dmamap_load when the mapping has been * established. We just record the physical address of the mapping into * the struct hdac_dma passed in. ****************************************************************************/ static void hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error) { struct hdac_dma *dma; if (error == 0) { dma = (struct hdac_dma *)callback_arg; dma->dma_paddr = segs[0].ds_addr; } } /**************************************************************************** * int hdac_dma_alloc * * This function allocate and setup a dma region (struct hdac_dma). * It must be freed by a corresponding hdac_dma_free. ****************************************************************************/ static int hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size) { bus_size_t roundsz; int result; roundsz = roundup2(size, HDA_DMA_ALIGNMENT); bzero(dma, sizeof(*dma)); /* * Create a DMA tag */ result = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* parent */ HDA_DMA_ALIGNMENT, /* alignment */ 0, /* boundary */ (sc->support_64bit) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, /* filtfunc */ NULL, /* fistfuncarg */ roundsz, /* maxsize */ 1, /* nsegments */ roundsz, /* maxsegsz */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &dma->dma_tag); /* dmat */ if (result != 0) { device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n", __func__, result); goto hdac_dma_alloc_fail; } /* * Allocate DMA memory */ result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO | ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE : BUS_DMA_COHERENT), &dma->dma_map); if (result != 0) { device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n", __func__, result); goto hdac_dma_alloc_fail; } dma->dma_size = roundsz; /* * Map the memory */ result = bus_dmamap_load(dma->dma_tag, dma->dma_map, (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0); if (result != 0 || dma->dma_paddr == 0) { if (result == 0) result = ENOMEM; device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n", __func__, result); goto hdac_dma_alloc_fail; } HDA_BOOTHVERBOSE( device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n", __func__, (uintmax_t)size, (uintmax_t)roundsz); ); return (0); hdac_dma_alloc_fail: hdac_dma_free(sc, dma); return (result); } /**************************************************************************** * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *) * * Free a struct hdac_dma that has been previously allocated via the * hdac_dma_alloc function. ****************************************************************************/ static void hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma) { if (dma->dma_paddr != 0) { /* Flush caches */ bus_dmamap_sync(dma->dma_tag, dma->dma_map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->dma_tag, dma->dma_map); dma->dma_paddr = 0; } if (dma->dma_vaddr != NULL) { bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); dma->dma_vaddr = NULL; } if (dma->dma_tag != NULL) { bus_dma_tag_destroy(dma->dma_tag); dma->dma_tag = NULL; } dma->dma_size = 0; } /**************************************************************************** * int hdac_mem_alloc(struct hdac_softc *) * * Allocate all the bus resources necessary to speak with the physical * controller. ****************************************************************************/ static int hdac_mem_alloc(struct hdac_softc *sc) { struct hdac_mem *mem; mem = &sc->mem; mem->mem_rid = PCIR_BAR(0); mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, &mem->mem_rid, RF_ACTIVE); if (mem->mem_res == NULL) { device_printf(sc->dev, "%s: Unable to allocate memory resource\n", __func__); return (ENOMEM); } mem->mem_tag = rman_get_bustag(mem->mem_res); mem->mem_handle = rman_get_bushandle(mem->mem_res); return (0); } /**************************************************************************** * void hdac_mem_free(struct hdac_softc *) * * Free up resources previously allocated by hdac_mem_alloc. ****************************************************************************/ static void hdac_mem_free(struct hdac_softc *sc) { struct hdac_mem *mem; mem = &sc->mem; if (mem->mem_res != NULL) bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid, mem->mem_res); mem->mem_res = NULL; } /**************************************************************************** * int hdac_irq_alloc(struct hdac_softc *) * * Allocate and setup the resources necessary for interrupt handling. ****************************************************************************/ static int hdac_irq_alloc(struct hdac_softc *sc) { struct hdac_irq *irq; int result; irq = &sc->irq; irq->irq_rid = 0x0; if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 && (result = pci_msi_count(sc->dev)) == 1 && pci_alloc_msi(sc->dev, &result) == 0) irq->irq_rid = 0x1; irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE); if (irq->irq_res == NULL) { device_printf(sc->dev, "%s: Unable to allocate irq\n", __func__); goto hdac_irq_alloc_fail; } result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV, NULL, hdac_intr_handler, sc, &irq->irq_handle); if (result != 0) { device_printf(sc->dev, "%s: Unable to setup interrupt handler (%d)\n", __func__, result); goto hdac_irq_alloc_fail; } return (0); hdac_irq_alloc_fail: hdac_irq_free(sc); return (ENXIO); } /**************************************************************************** * void hdac_irq_free(struct hdac_softc *) * * Free up resources previously allocated by hdac_irq_alloc. ****************************************************************************/ static void hdac_irq_free(struct hdac_softc *sc) { struct hdac_irq *irq; irq = &sc->irq; if (irq->irq_res != NULL && irq->irq_handle != NULL) bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle); if (irq->irq_res != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid, irq->irq_res); if (irq->irq_rid == 0x1) pci_release_msi(sc->dev); irq->irq_handle = NULL; irq->irq_res = NULL; irq->irq_rid = 0x0; } /**************************************************************************** * void hdac_corb_init(struct hdac_softc *) * * Initialize the corb registers for operations but do not start it up yet. * The CORB engine must not be running when this function is called. ****************************************************************************/ static void hdac_corb_init(struct hdac_softc *sc) { uint8_t corbsize; uint64_t corbpaddr; /* Setup the CORB size. */ switch (sc->corb_size) { case 256: corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256); break; case 16: corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16); break; case 2: corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2); break; default: panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size); } HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize); /* Setup the CORB Address in the hdac */ corbpaddr = (uint64_t)sc->corb_dma.dma_paddr; HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr); HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32)); /* Set the WP and RP */ sc->corb_wp = 0; HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST); /* * The HDA specification indicates that the CORBRPRST bit will always * read as zero. Unfortunately, it seems that at least the 82801G * doesn't reset the bit to zero, which stalls the corb engine. * manually reset the bit to zero before continuing. */ HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0); /* Enable CORB error reporting */ #if 0 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE); #endif } /**************************************************************************** * void hdac_rirb_init(struct hdac_softc *) * * Initialize the rirb registers for operations but do not start it up yet. * The RIRB engine must not be running when this function is called. ****************************************************************************/ static void hdac_rirb_init(struct hdac_softc *sc) { uint8_t rirbsize; uint64_t rirbpaddr; /* Setup the RIRB size. */ switch (sc->rirb_size) { case 256: rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256); break; case 16: rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16); break; case 2: rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2); break; default: panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size); } HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize); /* Setup the RIRB Address in the hdac */ rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr; HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr); HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32)); /* Setup the WP and RP */ sc->rirb_rp = 0; HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST); /* Setup the interrupt threshold */ HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2); /* Enable Overrun and response received reporting */ #if 0 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL); #else HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL); #endif /* * Make sure that the Host CPU cache doesn't contain any dirty * cache lines that falls in the rirb. If I understood correctly, it * should be sufficient to do this only once as the rirb is purely * read-only from now on. */ bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, BUS_DMASYNC_PREREAD); } /**************************************************************************** * void hdac_corb_start(hdac_softc *) * * Startup the corb DMA engine ****************************************************************************/ static void hdac_corb_start(struct hdac_softc *sc) { uint32_t corbctl; corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL); corbctl |= HDAC_CORBCTL_CORBRUN; HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl); } /**************************************************************************** * void hdac_rirb_start(hdac_softc *) * * Startup the rirb DMA engine ****************************************************************************/ static void hdac_rirb_start(struct hdac_softc *sc) { uint32_t rirbctl; rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL); rirbctl |= HDAC_RIRBCTL_RIRBDMAEN; HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl); } static int hdac_rirb_flush(struct hdac_softc *sc) { struct hdac_rirb *rirb_base, *rirb; nid_t cad; uint32_t resp, resp_ex; uint8_t rirbwp; int ret; rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr; rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP); bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, BUS_DMASYNC_POSTREAD); ret = 0; while (sc->rirb_rp != rirbwp) { sc->rirb_rp++; sc->rirb_rp %= sc->rirb_size; rirb = &rirb_base[sc->rirb_rp]; resp = le32toh(rirb->response); resp_ex = le32toh(rirb->response_ex); cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex); if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) { sc->unsolq[sc->unsolq_wp++] = resp; sc->unsolq_wp %= HDAC_UNSOLQ_MAX; sc->unsolq[sc->unsolq_wp++] = cad; sc->unsolq_wp %= HDAC_UNSOLQ_MAX; } else if (sc->codecs[cad].pending <= 0) { device_printf(sc->dev, "Unexpected unsolicited " "response from address %d: %08x\n", cad, resp); } else { sc->codecs[cad].response = resp; sc->codecs[cad].pending--; } ret++; } bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map, BUS_DMASYNC_PREREAD); return (ret); } static int hdac_unsolq_flush(struct hdac_softc *sc) { device_t child; nid_t cad; uint32_t resp; int ret = 0; if (sc->unsolq_st == HDAC_UNSOLQ_READY) { sc->unsolq_st = HDAC_UNSOLQ_BUSY; while (sc->unsolq_rp != sc->unsolq_wp) { resp = sc->unsolq[sc->unsolq_rp++]; sc->unsolq_rp %= HDAC_UNSOLQ_MAX; cad = sc->unsolq[sc->unsolq_rp++]; sc->unsolq_rp %= HDAC_UNSOLQ_MAX; if ((child = sc->codecs[cad].dev) != NULL && device_is_attached(child)) HDAC_UNSOL_INTR(child, resp); ret++; } sc->unsolq_st = HDAC_UNSOLQ_READY; } return (ret); } /**************************************************************************** * uint32_t hdac_send_command * * Wrapper function that sends only one command to a given codec ****************************************************************************/ static uint32_t hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb) { int timeout; uint32_t *corb; hdac_lockassert(sc); verb &= ~HDA_CMD_CAD_MASK; verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT; sc->codecs[cad].response = HDA_INVALID; sc->codecs[cad].pending++; sc->corb_wp++; sc->corb_wp %= sc->corb_size; corb = (uint32_t *)sc->corb_dma.dma_vaddr; bus_dmamap_sync(sc->corb_dma.dma_tag, sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE); corb[sc->corb_wp] = htole32(verb); bus_dmamap_sync(sc->corb_dma.dma_tag, sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE); HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp); timeout = 10000; do { if (hdac_rirb_flush(sc) == 0) DELAY(10); } while (sc->codecs[cad].pending != 0 && --timeout); if (sc->codecs[cad].pending != 0) { device_printf(sc->dev, "Command 0x%08x timeout on address %d\n", verb, cad); sc->codecs[cad].pending = 0; } if (sc->unsolq_rp != sc->unsolq_wp) taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task); return (sc->codecs[cad].response); } /**************************************************************************** * Device Methods ****************************************************************************/ /**************************************************************************** * int hdac_probe(device_t) * * Probe for the presence of an hdac. If none is found, check for a generic * match using the subclass of the device. ****************************************************************************/ static int hdac_probe(device_t dev) { int i, result; uint32_t model; uint16_t class, subclass; char desc[64]; model = (uint32_t)pci_get_device(dev) << 16; model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; class = pci_get_class(dev); subclass = pci_get_subclass(dev); bzero(desc, sizeof(desc)); result = ENXIO; for (i = 0; i < nitems(hdac_devices); i++) { if (hdac_devices[i].model == model) { strlcpy(desc, hdac_devices[i].desc, sizeof(desc)); result = BUS_PROBE_DEFAULT; break; } if (HDA_DEV_MATCH(hdac_devices[i].model, model) && class == PCIC_MULTIMEDIA && subclass == PCIS_MULTIMEDIA_HDA) { snprintf(desc, sizeof(desc), "%s (0x%04x)", hdac_devices[i].desc, pci_get_device(dev)); result = BUS_PROBE_GENERIC; break; } } if (result == ENXIO && class == PCIC_MULTIMEDIA && subclass == PCIS_MULTIMEDIA_HDA) { snprintf(desc, sizeof(desc), "Generic (0x%08x)", model); result = BUS_PROBE_GENERIC; } if (result != ENXIO) device_set_descf(dev, "%s HDA Controller", desc); return (result); } static void hdac_unsolq_task(void *context, int pending) { struct hdac_softc *sc; sc = (struct hdac_softc *)context; hdac_lock(sc); hdac_unsolq_flush(sc); hdac_unlock(sc); } /**************************************************************************** * int hdac_attach(device_t) * * Attach the device into the kernel. Interrupts usually won't be enabled * when this function is called. Setup everything that doesn't require * interrupts and defer probing of codecs until interrupts are enabled. ****************************************************************************/ static int hdac_attach(device_t dev) { struct hdac_softc *sc; int result; int i, devid = -1; uint32_t model; uint16_t class, subclass; uint16_t vendor; uint8_t v; sc = device_get_softc(dev); HDA_BOOTVERBOSE( device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n", pci_get_subvendor(dev), pci_get_subdevice(dev)); device_printf(dev, "HDA Driver Revision: %s\n", HDA_DRV_TEST_REV); ); model = (uint32_t)pci_get_device(dev) << 16; model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff; class = pci_get_class(dev); subclass = pci_get_subclass(dev); for (i = 0; i < nitems(hdac_devices); i++) { if (hdac_devices[i].model == model) { devid = i; break; } if (HDA_DEV_MATCH(hdac_devices[i].model, model) && class == PCIC_MULTIMEDIA && subclass == PCIS_MULTIMEDIA_HDA) { devid = i; break; } } sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex"); sc->dev = dev; TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc); callout_init(&sc->poll_callout, 1); for (i = 0; i < HDAC_CODEC_MAX; i++) sc->codecs[i].dev = NULL; if (devid >= 0) { sc->quirks_on = hdac_devices[devid].quirks_on; sc->quirks_off = hdac_devices[devid].quirks_off; } else { sc->quirks_on = 0; sc->quirks_off = 0; } if (resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &i) == 0) { if (i == 0) sc->quirks_off |= HDAC_QUIRK_MSI; else { sc->quirks_on |= HDAC_QUIRK_MSI; sc->quirks_off |= ~HDAC_QUIRK_MSI; } } hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off); HDA_BOOTVERBOSE( device_printf(sc->dev, "Config options: on=0x%08x off=0x%08x\n", sc->quirks_on, sc->quirks_off); ); sc->poll_ival = hz; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "polling", &i) == 0 && i != 0) sc->polling = 1; else sc->polling = 0; pci_enable_busmaster(dev); vendor = pci_get_vendor(dev); if (vendor == INTEL_VENDORID) { /* TCSEL -> TC0 */ v = pci_read_config(dev, 0x44, 1); pci_write_config(dev, 0x44, v & 0xf8, 1); HDA_BOOTHVERBOSE( device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v, pci_read_config(dev, 0x44, 1)); ); } #if defined(__i386__) || defined(__amd64__) sc->flags |= HDAC_F_DMA_NOCACHE; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "snoop", &i) == 0 && i != 0) { #else sc->flags &= ~HDAC_F_DMA_NOCACHE; #endif /* * Try to enable PCIe snoop to avoid messing around with * uncacheable DMA attribute. Since PCIe snoop register * config is pretty much vendor specific, there are no * general solutions on how to enable it, forcing us (even * Microsoft) to enable uncacheable or write combined DMA * by default. * * http://msdn2.microsoft.com/en-us/library/ms790324.aspx */ for (i = 0; i < nitems(hdac_pcie_snoop); i++) { if (hdac_pcie_snoop[i].vendor != vendor) continue; sc->flags &= ~HDAC_F_DMA_NOCACHE; if (hdac_pcie_snoop[i].reg == 0x00) break; v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); if ((v & hdac_pcie_snoop[i].enable) == hdac_pcie_snoop[i].enable) break; v &= hdac_pcie_snoop[i].mask; v |= hdac_pcie_snoop[i].enable; pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1); v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1); if ((v & hdac_pcie_snoop[i].enable) != hdac_pcie_snoop[i].enable) { HDA_BOOTVERBOSE( device_printf(dev, "WARNING: Failed to enable PCIe " "snoop!\n"); ); #if defined(__i386__) || defined(__amd64__) sc->flags |= HDAC_F_DMA_NOCACHE; #endif } break; } #if defined(__i386__) || defined(__amd64__) } #endif HDA_BOOTHVERBOSE( device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n", (sc->flags & HDAC_F_DMA_NOCACHE) ? "Uncacheable" : "PCIe snoop", vendor); ); /* Allocate resources */ result = hdac_mem_alloc(sc); if (result != 0) goto hdac_attach_fail; /* Get Capabilities */ result = hdac_get_capabilities(sc); if (result != 0) goto hdac_attach_fail; /* Allocate CORB, RIRB, POS and BDLs dma memory */ result = hdac_dma_alloc(sc, &sc->corb_dma, sc->corb_size * sizeof(uint32_t)); if (result != 0) goto hdac_attach_fail; result = hdac_dma_alloc(sc, &sc->rirb_dma, sc->rirb_size * sizeof(struct hdac_rirb)); if (result != 0) goto hdac_attach_fail; sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss, M_HDAC, M_ZERO | M_WAITOK); for (i = 0; i < sc->num_ss; i++) { result = hdac_dma_alloc(sc, &sc->streams[i].bdl, sizeof(struct hdac_bdle) * HDA_BDL_MAX); if (result != 0) goto hdac_attach_fail; } if (sc->quirks_on & HDAC_QUIRK_DMAPOS) { if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) { HDA_BOOTVERBOSE( device_printf(dev, "Failed to " "allocate DMA pos buffer " "(non-fatal)\n"); ); } else { uint64_t addr = sc->pos_dma.dma_paddr; HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32); HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, (addr & HDAC_DPLBASE_DPLBASE_MASK) | HDAC_DPLBASE_DPLBASE_DMAPBE); } } result = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* parent */ HDA_DMA_ALIGNMENT, /* alignment */ 0, /* boundary */ (sc->support_64bit) ? BUS_SPACE_MAXADDR : BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, /* filtfunc */ NULL, /* fistfuncarg */ HDA_BUFSZ_MAX, /* maxsize */ 1, /* nsegments */ HDA_BUFSZ_MAX, /* maxsegsz */ 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ &sc->chan_dmat); /* dmat */ if (result != 0) { device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n", __func__, result); goto hdac_attach_fail; } /* Quiesce everything */ HDA_BOOTHVERBOSE( device_printf(dev, "Reset controller...\n"); ); hdac_reset(sc, true); /* Initialize the CORB and RIRB */ hdac_corb_init(sc); hdac_rirb_init(sc); result = hdac_irq_alloc(sc); if (result != 0) goto hdac_attach_fail; /* Defer remaining of initialization until interrupts are enabled */ sc->intrhook.ich_func = hdac_attach2; sc->intrhook.ich_arg = (void *)sc; if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) { sc->intrhook.ich_func = NULL; hdac_attach2((void *)sc); } return (0); hdac_attach_fail: hdac_irq_free(sc); if (sc->streams != NULL) for (i = 0; i < sc->num_ss; i++) hdac_dma_free(sc, &sc->streams[i].bdl); free(sc->streams, M_HDAC); hdac_dma_free(sc, &sc->rirb_dma); hdac_dma_free(sc, &sc->corb_dma); hdac_mem_free(sc); snd_mtxfree(sc->lock); return (ENXIO); } static int sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS) { struct hdac_softc *sc; device_t *devlist; device_t dev; int devcount, i, err, val; dev = oidp->oid_arg1; sc = device_get_softc(dev); if (sc == NULL) return (EINVAL); val = 0; err = sysctl_handle_int(oidp, &val, 0, req); if (err != 0 || req->newptr == NULL || val == 0) return (err); /* XXX: Temporary. For debugging. */ if (val == 100) { hdac_suspend(dev); return (0); } else if (val == 101) { hdac_resume(dev); return (0); } bus_topo_lock(); if ((err = device_get_children(dev, &devlist, &devcount)) != 0) { bus_topo_unlock(); return (err); } hdac_lock(sc); for (i = 0; i < devcount; i++) HDAC_PINDUMP(devlist[i]); hdac_unlock(sc); bus_topo_unlock(); free(devlist, M_TEMP); return (0); } static int hdac_mdata_rate(uint16_t fmt) { static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 }; int rate, bits; if (fmt & (1 << 14)) rate = 44100; else rate = 48000; rate *= ((fmt >> 11) & 0x07) + 1; rate /= ((fmt >> 8) & 0x07) + 1; bits = mbits[(fmt >> 4) & 0x03]; bits *= (fmt & 0x0f) + 1; return (rate * bits); } static int hdac_bdata_rate(uint16_t fmt, int output) { static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 }; int rate, bits; rate = 48000; rate *= ((fmt >> 11) & 0x07) + 1; bits = bbits[(fmt >> 4) & 0x03]; bits *= (fmt & 0x0f) + 1; if (!output) bits = ((bits + 7) & ~0x07) + 10; return (rate * bits); } static void hdac_poll_reinit(struct hdac_softc *sc) { int i, pollticks, min = 1000000; struct hdac_stream *s; if (sc->polling == 0) return; if (sc->unsol_registered > 0) min = hz / 2; for (i = 0; i < sc->num_ss; i++) { s = &sc->streams[i]; if (s->running == 0) continue; pollticks = ((uint64_t)hz * s->blksz) / (hdac_mdata_rate(s->format) / 8); pollticks >>= 1; if (pollticks > hz) pollticks = hz; if (pollticks < 1) pollticks = 1; if (min > pollticks) min = pollticks; } sc->poll_ival = min; if (min == 1000000) callout_stop(&sc->poll_callout); else callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc); } static int sysctl_hdac_polling(SYSCTL_HANDLER_ARGS) { struct hdac_softc *sc; device_t dev; uint32_t ctl; int err, val; dev = oidp->oid_arg1; sc = device_get_softc(dev); if (sc == NULL) return (EINVAL); hdac_lock(sc); val = sc->polling; hdac_unlock(sc); err = sysctl_handle_int(oidp, &val, 0, req); if (err != 0 || req->newptr == NULL) return (err); if (val < 0 || val > 1) return (EINVAL); hdac_lock(sc); if (val != sc->polling) { if (val == 0) { callout_stop(&sc->poll_callout); hdac_unlock(sc); callout_drain(&sc->poll_callout); hdac_lock(sc); sc->polling = 0; ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); ctl |= HDAC_INTCTL_GIE; HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); } else { ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); ctl &= ~HDAC_INTCTL_GIE; HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); sc->polling = 1; hdac_poll_reinit(sc); } } hdac_unlock(sc); return (err); } static void hdac_attach2(void *arg) { struct hdac_softc *sc; device_t child; uint32_t vendorid, revisionid; int i; uint16_t statests; sc = (struct hdac_softc *)arg; hdac_lock(sc); /* Remove ourselves from the config hooks */ if (sc->intrhook.ich_func != NULL) { config_intrhook_disestablish(&sc->intrhook); sc->intrhook.ich_func = NULL; } HDA_BOOTHVERBOSE( device_printf(sc->dev, "Starting CORB Engine...\n"); ); hdac_corb_start(sc); HDA_BOOTHVERBOSE( device_printf(sc->dev, "Starting RIRB Engine...\n"); ); hdac_rirb_start(sc); /* * Clear HDAC_WAKEEN as at present we have no use for SDI wake * (status change) interrupts. The documentation says that we * should not make any assumptions about the state of this register * and set it explicitly. * NB: this needs to be done before the interrupt is enabled as * the handler does not expect this interrupt source. */ HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); /* * Read and clear post-reset SDI wake status. * Each set bit corresponds to a codec that came out of reset. */ statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS); HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, statests); HDA_BOOTHVERBOSE( device_printf(sc->dev, "Enabling controller interrupt...\n"); ); HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | HDAC_GCTL_UNSOL); if (sc->polling == 0) { HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); } DELAY(1000); HDA_BOOTHVERBOSE( device_printf(sc->dev, "Scanning HDA codecs ...\n"); ); hdac_unlock(sc); for (i = 0; i < HDAC_CODEC_MAX; i++) { if (HDAC_STATESTS_SDIWAKE(statests, i)) { HDA_BOOTHVERBOSE( device_printf(sc->dev, "Found CODEC at address %d\n", i); ); hdac_lock(sc); vendorid = hdac_send_command(sc, i, HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID)); revisionid = hdac_send_command(sc, i, HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID)); hdac_unlock(sc); if (vendorid == HDA_INVALID && revisionid == HDA_INVALID) { device_printf(sc->dev, "CODEC at address %d not responding!\n", i); continue; } sc->codecs[i].vendor_id = HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid); sc->codecs[i].device_id = HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid); sc->codecs[i].revision_id = HDA_PARAM_REVISION_ID_REVISION_ID(revisionid); sc->codecs[i].stepping_id = HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid); child = device_add_child(sc->dev, "hdacc", DEVICE_UNIT_ANY); if (child == NULL) { device_printf(sc->dev, "Failed to add CODEC device\n"); continue; } device_set_ivars(child, (void *)(intptr_t)i); sc->codecs[i].dev = child; } } bus_attach_children(sc->dev); SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "pindump", CTLTYPE_INT | CTLFLAG_RW, sc->dev, sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data"); SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev), SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO, "polling", CTLTYPE_INT | CTLFLAG_RW, sc->dev, sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode"); } /**************************************************************************** * int hdac_suspend(device_t) * * Suspend and power down HDA bus and codecs. ****************************************************************************/ static int hdac_suspend(device_t dev) { struct hdac_softc *sc = device_get_softc(dev); HDA_BOOTHVERBOSE( device_printf(dev, "Suspend...\n"); ); bus_generic_suspend(dev); hdac_lock(sc); HDA_BOOTHVERBOSE( device_printf(dev, "Reset controller...\n"); ); callout_stop(&sc->poll_callout); hdac_reset(sc, false); hdac_unlock(sc); callout_drain(&sc->poll_callout); taskqueue_drain(taskqueue_thread, &sc->unsolq_task); HDA_BOOTHVERBOSE( device_printf(dev, "Suspend done\n"); ); return (0); } /**************************************************************************** * int hdac_resume(device_t) * * Powerup and restore HDA bus and codecs state. ****************************************************************************/ static int hdac_resume(device_t dev) { struct hdac_softc *sc = device_get_softc(dev); int error; HDA_BOOTHVERBOSE( device_printf(dev, "Resume...\n"); ); hdac_lock(sc); /* Quiesce everything */ HDA_BOOTHVERBOSE( device_printf(dev, "Reset controller...\n"); ); hdac_reset(sc, true); /* Initialize the CORB and RIRB */ hdac_corb_init(sc); hdac_rirb_init(sc); HDA_BOOTHVERBOSE( device_printf(dev, "Starting CORB Engine...\n"); ); hdac_corb_start(sc); HDA_BOOTHVERBOSE( device_printf(dev, "Starting RIRB Engine...\n"); ); hdac_rirb_start(sc); /* * Clear HDAC_WAKEEN as at present we have no use for SDI wake * (status change) events. The documentation says that we should * not make any assumptions about the state of this register and * set it explicitly. * Also, clear HDAC_STATESTS. * NB: this needs to be done before the interrupt is enabled as * the handler does not expect this interrupt source. */ HDAC_WRITE_2(&sc->mem, HDAC_WAKEEN, 0); HDAC_WRITE_2(&sc->mem, HDAC_STATESTS, HDAC_STATESTS_SDIWAKE_MASK); HDA_BOOTHVERBOSE( device_printf(dev, "Enabling controller interrupt...\n"); ); HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) | HDAC_GCTL_UNSOL); HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE); DELAY(1000); hdac_poll_reinit(sc); hdac_unlock(sc); error = bus_generic_resume(dev); HDA_BOOTHVERBOSE( device_printf(dev, "Resume done\n"); ); return (error); } /**************************************************************************** * int hdac_detach(device_t) * * Detach and free up resources utilized by the hdac device. ****************************************************************************/ static int hdac_detach(device_t dev) { struct hdac_softc *sc = device_get_softc(dev); - device_t *devlist; - int cad, i, devcount, error; + int i, error; - if ((error = device_get_children(dev, &devlist, &devcount)) != 0) + error = bus_generic_detach(dev); + if (error != 0) return (error); - for (i = 0; i < devcount; i++) { - cad = (intptr_t)device_get_ivars(devlist[i]); - if ((error = device_delete_child(dev, devlist[i])) != 0) { - free(devlist, M_TEMP); - return (error); - } - sc->codecs[cad].dev = NULL; - } - free(devlist, M_TEMP); hdac_lock(sc); hdac_reset(sc, false); hdac_unlock(sc); taskqueue_drain(taskqueue_thread, &sc->unsolq_task); hdac_irq_free(sc); for (i = 0; i < sc->num_ss; i++) hdac_dma_free(sc, &sc->streams[i].bdl); free(sc->streams, M_HDAC); hdac_dma_free(sc, &sc->pos_dma); hdac_dma_free(sc, &sc->rirb_dma); hdac_dma_free(sc, &sc->corb_dma); if (sc->chan_dmat != NULL) { bus_dma_tag_destroy(sc->chan_dmat); sc->chan_dmat = NULL; } hdac_mem_free(sc); snd_mtxfree(sc->lock); return (0); } static bus_dma_tag_t hdac_get_dma_tag(device_t dev, device_t child) { struct hdac_softc *sc = device_get_softc(dev); return (sc->chan_dmat); } static int hdac_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } static int hdac_child_location(device_t dev, device_t child, struct sbuf *sb) { sbuf_printf(sb, "cad=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static int hdac_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb) { struct hdac_softc *sc = device_get_softc(dev); nid_t cad = (uintptr_t)device_get_ivars(child); sbuf_printf(sb, "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x", sc->codecs[cad].vendor_id, sc->codecs[cad].device_id, sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id); return (0); } static int hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) { struct hdac_softc *sc = device_get_softc(dev); nid_t cad = (uintptr_t)device_get_ivars(child); switch (which) { case HDA_IVAR_CODEC_ID: *result = cad; break; case HDA_IVAR_VENDOR_ID: *result = sc->codecs[cad].vendor_id; break; case HDA_IVAR_DEVICE_ID: *result = sc->codecs[cad].device_id; break; case HDA_IVAR_REVISION_ID: *result = sc->codecs[cad].revision_id; break; case HDA_IVAR_STEPPING_ID: *result = sc->codecs[cad].stepping_id; break; case HDA_IVAR_SUBVENDOR_ID: *result = pci_get_subvendor(dev); break; case HDA_IVAR_SUBDEVICE_ID: *result = pci_get_subdevice(dev); break; case HDA_IVAR_DMA_NOCACHE: *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0; break; case HDA_IVAR_STRIPES_MASK: *result = (1 << (1 << sc->num_sdo)) - 1; break; default: return (ENOENT); } return (0); } static struct mtx * hdac_get_mtx(device_t dev, device_t child) { struct hdac_softc *sc = device_get_softc(dev); return (sc->lock); } static uint32_t hdac_codec_command(device_t dev, device_t child, uint32_t verb) { return (hdac_send_command(device_get_softc(dev), (intptr_t)device_get_ivars(child), verb)); } static int hdac_find_stream(struct hdac_softc *sc, int dir, int stream) { int i, ss; ss = -1; /* Allocate ISS/OSS first. */ if (dir == 0) { for (i = 0; i < sc->num_iss; i++) { if (sc->streams[i].stream == stream) { ss = i; break; } } } else { for (i = 0; i < sc->num_oss; i++) { if (sc->streams[i + sc->num_iss].stream == stream) { ss = i + sc->num_iss; break; } } } /* Fallback to BSS. */ if (ss == -1) { for (i = 0; i < sc->num_bss; i++) { if (sc->streams[i + sc->num_iss + sc->num_oss].stream == stream) { ss = i + sc->num_iss + sc->num_oss; break; } } } return (ss); } static int hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe, uint32_t **dmapos) { struct hdac_softc *sc = device_get_softc(dev); nid_t cad = (uintptr_t)device_get_ivars(child); int stream, ss, bw, maxbw, prevbw; /* Look for empty stream. */ ss = hdac_find_stream(sc, dir, 0); /* Return if found nothing. */ if (ss < 0) return (0); /* Check bus bandwidth. */ bw = hdac_bdata_rate(format, dir); if (dir == 1) { bw *= 1 << (sc->num_sdo - stripe); prevbw = sc->sdo_bw_used; maxbw = 48000 * 960 * (1 << sc->num_sdo); } else { prevbw = sc->codecs[cad].sdi_bw_used; maxbw = 48000 * 464; } HDA_BOOTHVERBOSE( device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n", (bw + prevbw) / 1000, maxbw / 1000, bw + prevbw > maxbw ? " -- OVERFLOW!" : ""); ); if (bw + prevbw > maxbw) return (0); if (dir == 1) sc->sdo_bw_used += bw; else sc->codecs[cad].sdi_bw_used += bw; /* Allocate stream number */ if (ss >= sc->num_iss + sc->num_oss) stream = 15 - (ss - sc->num_iss - sc->num_oss); else if (ss >= sc->num_iss) stream = ss - sc->num_iss + 1; else stream = ss + 1; sc->streams[ss].dev = child; sc->streams[ss].dir = dir; sc->streams[ss].stream = stream; sc->streams[ss].bw = bw; sc->streams[ss].format = format; sc->streams[ss].stripe = stripe; if (dmapos != NULL) { if (sc->pos_dma.dma_vaddr != NULL) *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8); else *dmapos = NULL; } return (stream); } static void hdac_stream_free(device_t dev, device_t child, int dir, int stream) { struct hdac_softc *sc = device_get_softc(dev); nid_t cad = (uintptr_t)device_get_ivars(child); int ss; ss = hdac_find_stream(sc, dir, stream); KASSERT(ss >= 0, ("Free for not allocated stream (%d/%d)\n", dir, stream)); if (dir == 1) sc->sdo_bw_used -= sc->streams[ss].bw; else sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw; sc->streams[ss].stream = 0; sc->streams[ss].dev = NULL; } static int hdac_stream_start(device_t dev, device_t child, int dir, int stream, bus_addr_t buf, int blksz, int blkcnt) { struct hdac_softc *sc = device_get_softc(dev); struct hdac_bdle *bdle; uint64_t addr; int i, ss, off; uint32_t ctl; ss = hdac_find_stream(sc, dir, stream); KASSERT(ss >= 0, ("Start for not allocated stream (%d/%d)\n", dir, stream)); addr = (uint64_t)buf; bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr; for (i = 0; i < blkcnt; i++, bdle++) { bdle->addrl = htole32((uint32_t)addr); bdle->addrh = htole32((uint32_t)(addr >> 32)); bdle->len = htole32(blksz); bdle->ioc = htole32(1); addr += blksz; } bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE); off = ss << 5; HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt); HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1); addr = sc->streams[ss].bdl.dma_paddr; HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr); HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32)); ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2); if (dir) ctl |= HDAC_SDCTL2_DIR; else ctl &= ~HDAC_SDCTL2_DIR; ctl &= ~HDAC_SDCTL2_STRM_MASK; ctl |= stream << HDAC_SDCTL2_STRM_SHIFT; ctl &= ~HDAC_SDCTL2_STRIPE_MASK; ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT; HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl); HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format); ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); ctl |= 1 << ss; HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS, HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS); ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | HDAC_SDCTL_RUN; HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); sc->streams[ss].blksz = blksz; sc->streams[ss].running = 1; hdac_poll_reinit(sc); return (0); } static void hdac_stream_stop(device_t dev, device_t child, int dir, int stream) { struct hdac_softc *sc = device_get_softc(dev); int ss, off; uint32_t ctl; ss = hdac_find_stream(sc, dir, stream); KASSERT(ss >= 0, ("Stop for not allocated stream (%d/%d)\n", dir, stream)); bus_dmamap_sync(sc->streams[ss].bdl.dma_tag, sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE); off = ss << 5; ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE | HDAC_SDCTL_RUN); HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL); ctl &= ~(1 << ss); HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl); sc->streams[ss].running = 0; hdac_poll_reinit(sc); } static void hdac_stream_reset(device_t dev, device_t child, int dir, int stream) { struct hdac_softc *sc = device_get_softc(dev); int timeout = 1000; int to = timeout; int ss, off; uint32_t ctl; ss = hdac_find_stream(sc, dir, stream); KASSERT(ss >= 0, ("Reset for not allocated stream (%d/%d)\n", dir, stream)); off = ss << 5; ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); ctl |= HDAC_SDCTL_SRST; HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); do { ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); if (ctl & HDAC_SDCTL_SRST) break; DELAY(10); } while (--to); if (!(ctl & HDAC_SDCTL_SRST)) device_printf(dev, "Reset setting timeout\n"); ctl &= ~HDAC_SDCTL_SRST; HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl); to = timeout; do { ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0); if (!(ctl & HDAC_SDCTL_SRST)) break; DELAY(10); } while (--to); if (ctl & HDAC_SDCTL_SRST) device_printf(dev, "Reset timeout!\n"); } static uint32_t hdac_stream_getptr(device_t dev, device_t child, int dir, int stream) { struct hdac_softc *sc = device_get_softc(dev); int ss, off; ss = hdac_find_stream(sc, dir, stream); KASSERT(ss >= 0, ("Reset for not allocated stream (%d/%d)\n", dir, stream)); off = ss << 5; return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB)); } static int hdac_unsol_alloc(device_t dev, device_t child, int tag) { struct hdac_softc *sc = device_get_softc(dev); sc->unsol_registered++; hdac_poll_reinit(sc); return (tag); } static void hdac_unsol_free(device_t dev, device_t child, int tag) { struct hdac_softc *sc = device_get_softc(dev); sc->unsol_registered--; hdac_poll_reinit(sc); } static device_method_t hdac_methods[] = { /* device interface */ DEVMETHOD(device_probe, hdac_probe), DEVMETHOD(device_attach, hdac_attach), DEVMETHOD(device_detach, hdac_detach), DEVMETHOD(device_suspend, hdac_suspend), DEVMETHOD(device_resume, hdac_resume), /* Bus interface */ DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag), DEVMETHOD(bus_print_child, hdac_print_child), DEVMETHOD(bus_child_location, hdac_child_location), DEVMETHOD(bus_child_pnpinfo, hdac_child_pnpinfo_method), DEVMETHOD(bus_read_ivar, hdac_read_ivar), DEVMETHOD(hdac_get_mtx, hdac_get_mtx), DEVMETHOD(hdac_codec_command, hdac_codec_command), DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc), DEVMETHOD(hdac_stream_free, hdac_stream_free), DEVMETHOD(hdac_stream_start, hdac_stream_start), DEVMETHOD(hdac_stream_stop, hdac_stream_stop), DEVMETHOD(hdac_stream_reset, hdac_stream_reset), DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr), DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc), DEVMETHOD(hdac_unsol_free, hdac_unsol_free), DEVMETHOD_END }; static driver_t hdac_driver = { "hdac", hdac_methods, sizeof(struct hdac_softc), }; DRIVER_MODULE(snd_hda, pci, hdac_driver, NULL, NULL); diff --git a/sys/dev/usb/net/usb_ethernet.c b/sys/dev/usb/net/usb_ethernet.c index 2f423f557569..977805cefe66 100644 --- a/sys/dev/usb/net/usb_ethernet.c +++ b/sys/dev/usb/net/usb_ethernet.c @@ -1,661 +1,659 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2009 Andrew Thompson (thompsa@FreeBSD.org) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static SYSCTL_NODE(_net, OID_AUTO, ue, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "USB Ethernet parameters"); #define UE_LOCK(_ue) mtx_lock((_ue)->ue_mtx) #define UE_UNLOCK(_ue) mtx_unlock((_ue)->ue_mtx) #define UE_LOCK_ASSERT(_ue, t) mtx_assert((_ue)->ue_mtx, t) MODULE_DEPEND(uether, usb, 1, 1, 1); MODULE_DEPEND(uether, miibus, 1, 1, 1); static struct unrhdr *ueunit; static usb_proc_callback_t ue_attach_post_task; static usb_proc_callback_t ue_promisc_task; static usb_proc_callback_t ue_setmulti_task; static usb_proc_callback_t ue_ifmedia_task; static usb_proc_callback_t ue_tick_task; static usb_proc_callback_t ue_start_task; static usb_proc_callback_t ue_stop_task; static void ue_init(void *); static void ue_start(if_t); static int ue_ifmedia_upd(if_t); static void ue_watchdog(void *); /* * Return values: * 0: success * Else: device has been detached */ uint8_t uether_pause(struct usb_ether *ue, unsigned _ticks) { if (usb_proc_is_gone(&ue->ue_tq)) { /* nothing to do */ return (1); } usb_pause_mtx(ue->ue_mtx, _ticks); return (0); } static void ue_queue_command(struct usb_ether *ue, usb_proc_callback_t *fn, struct usb_proc_msg *t0, struct usb_proc_msg *t1) { struct usb_ether_cfg_task *task; UE_LOCK_ASSERT(ue, MA_OWNED); if (usb_proc_is_gone(&ue->ue_tq)) { return; /* nothing to do */ } /* * NOTE: The task cannot get executed before we drop the * "sc_mtx" mutex. It is safe to update fields in the message * structure after that the message got queued. */ task = (struct usb_ether_cfg_task *) usb_proc_msignal(&ue->ue_tq, t0, t1); /* Setup callback and self pointers */ task->hdr.pm_callback = fn; task->ue = ue; /* * Start and stop must be synchronous! */ if ((fn == ue_start_task) || (fn == ue_stop_task)) usb_proc_mwait(&ue->ue_tq, t0, t1); } if_t uether_getifp(struct usb_ether *ue) { return (ue->ue_ifp); } struct mii_data * uether_getmii(struct usb_ether *ue) { return (device_get_softc(ue->ue_miibus)); } void * uether_getsc(struct usb_ether *ue) { return (ue->ue_sc); } static int ue_sysctl_parent(SYSCTL_HANDLER_ARGS) { struct usb_ether *ue = arg1; const char *name; name = device_get_nameunit(ue->ue_dev); return SYSCTL_OUT_STR(req, name); } int uether_ifattach(struct usb_ether *ue) { int error; /* check some critical parameters */ if ((ue->ue_dev == NULL) || (ue->ue_udev == NULL) || (ue->ue_mtx == NULL) || (ue->ue_methods == NULL)) return (EINVAL); error = usb_proc_create(&ue->ue_tq, ue->ue_mtx, device_get_nameunit(ue->ue_dev), USB_PRI_MED); if (error) { device_printf(ue->ue_dev, "could not setup taskqueue\n"); goto error; } /* fork rest of the attach code */ UE_LOCK(ue); ue_queue_command(ue, ue_attach_post_task, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); UE_UNLOCK(ue); error: return (error); } void uether_ifattach_wait(struct usb_ether *ue) { UE_LOCK(ue); usb_proc_mwait(&ue->ue_tq, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); UE_UNLOCK(ue); } static void ue_attach_post_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; if_t ifp; int error; char num[14]; /* sufficient for 32 bits */ /* first call driver's post attach routine */ ue->ue_methods->ue_attach_post(ue); UE_UNLOCK(ue); ue->ue_unit = alloc_unr(ueunit); usb_callout_init_mtx(&ue->ue_watchdog, ue->ue_mtx, 0); sysctl_ctx_init(&ue->ue_sysctl_ctx); mbufq_init(&ue->ue_rxq, 0 /* unlimited length */); error = 0; CURVNET_SET_QUIET(vnet0); ifp = if_alloc(IFT_ETHER); if_setsoftc(ifp, ue); if_initname(ifp, "ue", ue->ue_unit); if (ue->ue_methods->ue_attach_post_sub != NULL) { ue->ue_ifp = ifp; error = ue->ue_methods->ue_attach_post_sub(ue); } else { if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if (ue->ue_methods->ue_ioctl != NULL) if_setioctlfn(ifp, ue->ue_methods->ue_ioctl); else if_setioctlfn(ifp, uether_ioctl); if_setstartfn(ifp, ue_start); if_setinitfn(ifp, ue_init); if_setsendqlen(ifp, ifqmaxlen); if_setsendqready(ifp); ue->ue_ifp = ifp; if (ue->ue_methods->ue_mii_upd != NULL && ue->ue_methods->ue_mii_sts != NULL) { bus_topo_lock(); error = mii_attach(ue->ue_dev, &ue->ue_miibus, ifp, ue_ifmedia_upd, ue->ue_methods->ue_mii_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); bus_topo_unlock(); } } if (error) { device_printf(ue->ue_dev, "attaching PHYs failed\n"); goto fail; } if_printf(ifp, " on %s\n", device_get_nameunit(ue->ue_dev)); ether_ifattach(ifp, ue->ue_eaddr); /* Tell upper layer we support VLAN oversized frames. */ if (if_getcapabilities(ifp) & IFCAP_VLAN_MTU) if_setifheaderlen(ifp, sizeof(struct ether_vlan_header)); CURVNET_RESTORE(); snprintf(num, sizeof(num), "%u", ue->ue_unit); ue->ue_sysctl_oid = SYSCTL_ADD_NODE(&ue->ue_sysctl_ctx, &SYSCTL_NODE_CHILDREN(_net, ue), OID_AUTO, num, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, ""); SYSCTL_ADD_PROC(&ue->ue_sysctl_ctx, SYSCTL_CHILDREN(ue->ue_sysctl_oid), OID_AUTO, "%parent", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ue, 0, ue_sysctl_parent, "A", "parent device"); UE_LOCK(ue); return; fail: CURVNET_RESTORE(); /* drain mbuf queue */ mbufq_drain(&ue->ue_rxq); /* free unit */ free_unr(ueunit, ue->ue_unit); if (ue->ue_ifp != NULL) { if_free(ue->ue_ifp); ue->ue_ifp = NULL; } UE_LOCK(ue); return; } void uether_ifdetach(struct usb_ether *ue) { if_t ifp; /* wait for any post attach or other command to complete */ usb_proc_drain(&ue->ue_tq); /* read "ifnet" pointer after taskqueue drain */ ifp = ue->ue_ifp; if (ifp != NULL) { /* we are not running any more */ UE_LOCK(ue); if_setdrvflagbits(ifp, 0, IFF_DRV_RUNNING); UE_UNLOCK(ue); /* drain any callouts */ usb_callout_drain(&ue->ue_watchdog); /* * Detach ethernet first to stop miibus calls from * user-space: */ ether_ifdetach(ifp); /* detach miibus */ - if (ue->ue_miibus != NULL) { - bus_topo_lock(); - device_delete_child(ue->ue_dev, ue->ue_miibus); - bus_topo_unlock(); - } + bus_topo_lock(); + bus_generic_detach(ue->ue_dev); + bus_topo_unlock(); /* free interface instance */ if_free(ifp); /* free sysctl */ sysctl_ctx_free(&ue->ue_sysctl_ctx); /* drain mbuf queue */ mbufq_drain(&ue->ue_rxq); /* free unit */ free_unr(ueunit, ue->ue_unit); } /* free taskqueue, if any */ usb_proc_free(&ue->ue_tq); } uint8_t uether_is_gone(struct usb_ether *ue) { return (usb_proc_is_gone(&ue->ue_tq)); } void uether_init(void *arg) { ue_init(arg); } static void ue_init(void *arg) { struct usb_ether *ue = arg; UE_LOCK(ue); ue_queue_command(ue, ue_start_task, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); UE_UNLOCK(ue); } static void ue_start_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; if_t ifp = ue->ue_ifp; UE_LOCK_ASSERT(ue, MA_OWNED); ue->ue_methods->ue_init(ue); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; if (ue->ue_methods->ue_tick != NULL) usb_callout_reset(&ue->ue_watchdog, hz, ue_watchdog, ue); } static void ue_stop_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; UE_LOCK_ASSERT(ue, MA_OWNED); usb_callout_stop(&ue->ue_watchdog); ue->ue_methods->ue_stop(ue); } void uether_start(if_t ifp) { ue_start(ifp); } static void ue_start(if_t ifp) { struct usb_ether *ue = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; UE_LOCK(ue); ue->ue_methods->ue_start(ue); UE_UNLOCK(ue); } static void ue_promisc_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; ue->ue_methods->ue_setpromisc(ue); } static void ue_setmulti_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; ue->ue_methods->ue_setmulti(ue); } int uether_ifmedia_upd(if_t ifp) { return (ue_ifmedia_upd(ifp)); } static int ue_ifmedia_upd(if_t ifp) { struct usb_ether *ue = if_getsoftc(ifp); /* Defer to process context */ UE_LOCK(ue); ue_queue_command(ue, ue_ifmedia_task, &ue->ue_media_task[0].hdr, &ue->ue_media_task[1].hdr); UE_UNLOCK(ue); return (0); } static void ue_ifmedia_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; if_t ifp = ue->ue_ifp; ue->ue_methods->ue_mii_upd(ifp); } static void ue_watchdog(void *arg) { struct usb_ether *ue = arg; if_t ifp = ue->ue_ifp; if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; ue_queue_command(ue, ue_tick_task, &ue->ue_tick_task[0].hdr, &ue->ue_tick_task[1].hdr); usb_callout_reset(&ue->ue_watchdog, hz, ue_watchdog, ue); } static void ue_tick_task(struct usb_proc_msg *_task) { struct usb_ether_cfg_task *task = (struct usb_ether_cfg_task *)_task; struct usb_ether *ue = task->ue; if_t ifp = ue->ue_ifp; if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; ue->ue_methods->ue_tick(ue); } int uether_ioctl(if_t ifp, u_long command, caddr_t data) { struct usb_ether *ue = if_getsoftc(ifp); struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; int error = 0; switch (command) { case SIOCSIFFLAGS: UE_LOCK(ue); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) ue_queue_command(ue, ue_promisc_task, &ue->ue_promisc_task[0].hdr, &ue->ue_promisc_task[1].hdr); else ue_queue_command(ue, ue_start_task, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); } else { ue_queue_command(ue, ue_stop_task, &ue->ue_sync_task[0].hdr, &ue->ue_sync_task[1].hdr); } UE_UNLOCK(ue); break; case SIOCADDMULTI: case SIOCDELMULTI: UE_LOCK(ue); ue_queue_command(ue, ue_setmulti_task, &ue->ue_multi_task[0].hdr, &ue->ue_multi_task[1].hdr); UE_UNLOCK(ue); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: if (ue->ue_miibus != NULL) { mii = device_get_softc(ue->ue_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); } else error = ether_ioctl(ifp, command, data); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static int uether_modevent(module_t mod, int type, void *data) { switch (type) { case MOD_LOAD: ueunit = new_unrhdr(0, INT_MAX, NULL); break; case MOD_UNLOAD: break; default: return (EOPNOTSUPP); } return (0); } static moduledata_t uether_mod = { "uether", uether_modevent, 0 }; struct mbuf * uether_newbuf(void) { struct mbuf *m_new; m_new = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m_new == NULL) return (NULL); m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_adj(m_new, ETHER_ALIGN); return (m_new); } int uether_rxmbuf(struct usb_ether *ue, struct mbuf *m, unsigned len) { if_t ifp = ue->ue_ifp; UE_LOCK_ASSERT(ue, MA_OWNED); /* finalize mbuf */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* enqueue for later when the lock can be released */ (void)mbufq_enqueue(&ue->ue_rxq, m); return (0); } int uether_rxbuf(struct usb_ether *ue, struct usb_page_cache *pc, unsigned offset, unsigned len) { if_t ifp = ue->ue_ifp; struct mbuf *m; UE_LOCK_ASSERT(ue, MA_OWNED); if (len < ETHER_HDR_LEN || len > MCLBYTES - ETHER_ALIGN) return (1); m = uether_newbuf(); if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); return (ENOMEM); } usbd_copy_out(pc, offset, mtod(m, uint8_t *), len); /* finalize mbuf */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = len; /* enqueue for later when the lock can be released */ (void)mbufq_enqueue(&ue->ue_rxq, m); return (0); } void uether_rxflush(struct usb_ether *ue) { if_t ifp = ue->ue_ifp; struct epoch_tracker et; struct mbuf *m, *n; UE_LOCK_ASSERT(ue, MA_OWNED); n = mbufq_flush(&ue->ue_rxq); UE_UNLOCK(ue); NET_EPOCH_ENTER(et); while ((m = n) != NULL) { n = STAILQ_NEXT(m, m_stailqpkt); m->m_nextpkt = NULL; if_input(ifp, m); } NET_EPOCH_EXIT(et); UE_LOCK(ue); } /* * USB net drivers are run by DRIVER_MODULE() thus SI_SUB_DRIVERS, * SI_ORDER_MIDDLE. Run uether after that. */ DECLARE_MODULE(uether, uether_mod, SI_SUB_DRIVERS, SI_ORDER_ANY); MODULE_VERSION(uether, 1); diff --git a/sys/dev/virtio/mmio/virtio_mmio.c b/sys/dev/virtio/mmio/virtio_mmio.c index b1a4230f7b46..175b33b42ed8 100644 --- a/sys/dev/virtio/mmio/virtio_mmio.c +++ b/sys/dev/virtio/mmio/virtio_mmio.c @@ -1,1026 +1,1022 @@ /*- * Copyright (c) 2014 Ruslan Bukin * Copyright (c) 2014 The FreeBSD Foundation * All rights reserved. * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237) * ("CTSRD"), as part of the DARPA CRASH research programme. * * Portions of this software were developed by Andrew Turner * under sponsorship from the FreeBSD Foundation. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * VirtIO MMIO interface. * This driver is heavily based on VirtIO PCI interface driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_mmio_if.h" #include "virtio_bus_if.h" #include "virtio_if.h" struct vtmmio_virtqueue { struct virtqueue *vtv_vq; int vtv_no_intr; }; static int vtmmio_detach(device_t); static int vtmmio_suspend(device_t); static int vtmmio_resume(device_t); static int vtmmio_shutdown(device_t); static void vtmmio_driver_added(device_t, driver_t *); static void vtmmio_child_detached(device_t, device_t); static int vtmmio_read_ivar(device_t, device_t, int, uintptr_t *); static int vtmmio_write_ivar(device_t, device_t, int, uintptr_t); static uint64_t vtmmio_negotiate_features(device_t, uint64_t); static int vtmmio_finalize_features(device_t); static bool vtmmio_with_feature(device_t, uint64_t); static void vtmmio_set_virtqueue(struct vtmmio_softc *sc, struct virtqueue *vq, uint32_t size); static int vtmmio_alloc_virtqueues(device_t, int, struct vq_alloc_info *); static int vtmmio_setup_intr(device_t, enum intr_type); static void vtmmio_stop(device_t); static void vtmmio_poll(device_t); static int vtmmio_reinit(device_t, uint64_t); static void vtmmio_reinit_complete(device_t); static void vtmmio_notify_virtqueue(device_t, uint16_t, bus_size_t); static int vtmmio_config_generation(device_t); static uint8_t vtmmio_get_status(device_t); static void vtmmio_set_status(device_t, uint8_t); static void vtmmio_read_dev_config(device_t, bus_size_t, void *, int); static uint64_t vtmmio_read_dev_config_8(struct vtmmio_softc *, bus_size_t); static void vtmmio_write_dev_config(device_t, bus_size_t, const void *, int); static void vtmmio_describe_features(struct vtmmio_softc *, const char *, uint64_t); static void vtmmio_probe_and_attach_child(struct vtmmio_softc *); static int vtmmio_reinit_virtqueue(struct vtmmio_softc *, int); static void vtmmio_free_interrupts(struct vtmmio_softc *); static void vtmmio_free_virtqueues(struct vtmmio_softc *); static void vtmmio_release_child_resources(struct vtmmio_softc *); static void vtmmio_reset(struct vtmmio_softc *); static void vtmmio_select_virtqueue(struct vtmmio_softc *, int); static void vtmmio_vq_intr(void *); /* * I/O port read/write wrappers. */ #define vtmmio_write_config_1(sc, o, v) \ do { \ if (sc->platform != NULL) \ VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ bus_write_1((sc)->res[0], (o), (v)); \ if (sc->platform != NULL) \ VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ } while (0) #define vtmmio_write_config_2(sc, o, v) \ do { \ if (sc->platform != NULL) \ VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ bus_write_2((sc)->res[0], (o), (v)); \ if (sc->platform != NULL) \ VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ } while (0) #define vtmmio_write_config_4(sc, o, v) \ do { \ if (sc->platform != NULL) \ VIRTIO_MMIO_PREWRITE(sc->platform, (o), (v)); \ bus_write_4((sc)->res[0], (o), (v)); \ if (sc->platform != NULL) \ VIRTIO_MMIO_NOTE(sc->platform, (o), (v)); \ } while (0) #define vtmmio_read_config_1(sc, o) \ bus_read_1((sc)->res[0], (o)) #define vtmmio_read_config_2(sc, o) \ bus_read_2((sc)->res[0], (o)) #define vtmmio_read_config_4(sc, o) \ bus_read_4((sc)->res[0], (o)) static device_method_t vtmmio_methods[] = { /* Device interface. */ DEVMETHOD(device_attach, vtmmio_attach), DEVMETHOD(device_detach, vtmmio_detach), DEVMETHOD(device_suspend, vtmmio_suspend), DEVMETHOD(device_resume, vtmmio_resume), DEVMETHOD(device_shutdown, vtmmio_shutdown), /* Bus interface. */ DEVMETHOD(bus_driver_added, vtmmio_driver_added), DEVMETHOD(bus_child_detached, vtmmio_child_detached), DEVMETHOD(bus_child_pnpinfo, virtio_child_pnpinfo), DEVMETHOD(bus_read_ivar, vtmmio_read_ivar), DEVMETHOD(bus_write_ivar, vtmmio_write_ivar), /* VirtIO bus interface. */ DEVMETHOD(virtio_bus_negotiate_features, vtmmio_negotiate_features), DEVMETHOD(virtio_bus_finalize_features, vtmmio_finalize_features), DEVMETHOD(virtio_bus_with_feature, vtmmio_with_feature), DEVMETHOD(virtio_bus_alloc_virtqueues, vtmmio_alloc_virtqueues), DEVMETHOD(virtio_bus_setup_intr, vtmmio_setup_intr), DEVMETHOD(virtio_bus_stop, vtmmio_stop), DEVMETHOD(virtio_bus_poll, vtmmio_poll), DEVMETHOD(virtio_bus_reinit, vtmmio_reinit), DEVMETHOD(virtio_bus_reinit_complete, vtmmio_reinit_complete), DEVMETHOD(virtio_bus_notify_vq, vtmmio_notify_virtqueue), DEVMETHOD(virtio_bus_config_generation, vtmmio_config_generation), DEVMETHOD(virtio_bus_read_device_config, vtmmio_read_dev_config), DEVMETHOD(virtio_bus_write_device_config, vtmmio_write_dev_config), DEVMETHOD_END }; DEFINE_CLASS_0(virtio_mmio, vtmmio_driver, vtmmio_methods, sizeof(struct vtmmio_softc)); MODULE_VERSION(virtio_mmio, 1); int vtmmio_probe(device_t dev) { struct vtmmio_softc *sc; int rid; uint32_t magic, version; sc = device_get_softc(dev); rid = 0; sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->res[0] == NULL) { device_printf(dev, "Cannot allocate memory window.\n"); return (ENXIO); } magic = vtmmio_read_config_4(sc, VIRTIO_MMIO_MAGIC_VALUE); if (magic != VIRTIO_MMIO_MAGIC_VIRT) { device_printf(dev, "Bad magic value %#x\n", magic); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); return (ENXIO); } version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION); if (version < 1 || version > 2) { device_printf(dev, "Unsupported version: %#x\n", version); bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); return (ENXIO); } if (vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID) == 0) { bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); return (ENXIO); } bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res[0]); device_set_desc(dev, "VirtIO MMIO adapter"); return (BUS_PROBE_DEFAULT); } static int vtmmio_setup_intr(device_t dev, enum intr_type type) { struct vtmmio_softc *sc; int rid; int err; sc = device_get_softc(dev); if (sc->platform != NULL) { err = VIRTIO_MMIO_SETUP_INTR(sc->platform, sc->dev, vtmmio_vq_intr, sc); if (err == 0) { /* Okay we have backend-specific interrupts */ return (0); } } rid = 0; sc->res[1] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE); if (!sc->res[1]) { device_printf(dev, "Can't allocate interrupt\n"); return (ENXIO); } if (bus_setup_intr(dev, sc->res[1], type | INTR_MPSAFE, NULL, vtmmio_vq_intr, sc, &sc->ih)) { device_printf(dev, "Can't setup the interrupt\n"); return (ENXIO); } return (0); } int vtmmio_attach(device_t dev) { struct vtmmio_softc *sc; device_t child; int rid; sc = device_get_softc(dev); sc->dev = dev; rid = 0; sc->res[0] = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->res[0] == NULL) { device_printf(dev, "Cannot allocate memory window.\n"); return (ENXIO); } sc->vtmmio_version = vtmmio_read_config_4(sc, VIRTIO_MMIO_VERSION); vtmmio_reset(sc); /* Tell the host we've noticed this device. */ vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); if ((child = device_add_child(dev, NULL, -1)) == NULL) { device_printf(dev, "Cannot create child device.\n"); vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); vtmmio_detach(dev); return (ENOMEM); } sc->vtmmio_child_dev = child; vtmmio_probe_and_attach_child(sc); return (0); } static int vtmmio_detach(device_t dev) { struct vtmmio_softc *sc; - device_t child; int error; sc = device_get_softc(dev); - if ((child = sc->vtmmio_child_dev) != NULL) { - error = device_delete_child(dev, child); - if (error) - return (error); - sc->vtmmio_child_dev = NULL; - } + error = bus_generic_detach(dev); + if (error) + return (error); vtmmio_reset(sc); if (sc->res[0] != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res[0]); sc->res[0] = NULL; } return (0); } static int vtmmio_suspend(device_t dev) { return (bus_generic_suspend(dev)); } static int vtmmio_resume(device_t dev) { return (bus_generic_resume(dev)); } static int vtmmio_shutdown(device_t dev) { (void) bus_generic_shutdown(dev); /* Forcibly stop the host device. */ vtmmio_stop(dev); return (0); } static void vtmmio_driver_added(device_t dev, driver_t *driver) { struct vtmmio_softc *sc; sc = device_get_softc(dev); vtmmio_probe_and_attach_child(sc); } static void vtmmio_child_detached(device_t dev, device_t child) { struct vtmmio_softc *sc; sc = device_get_softc(dev); vtmmio_reset(sc); vtmmio_release_child_resources(sc); } static int vtmmio_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct vtmmio_softc *sc; sc = device_get_softc(dev); if (sc->vtmmio_child_dev != child) return (ENOENT); switch (index) { case VIRTIO_IVAR_DEVTYPE: case VIRTIO_IVAR_SUBDEVICE: *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_DEVICE_ID); break; case VIRTIO_IVAR_VENDOR: *result = vtmmio_read_config_4(sc, VIRTIO_MMIO_VENDOR_ID); break; case VIRTIO_IVAR_SUBVENDOR: case VIRTIO_IVAR_DEVICE: /* * Dummy value for fields not present in this bus. Used by * bus-agnostic virtio_child_pnpinfo. */ *result = 0; break; case VIRTIO_IVAR_MODERN: /* * There are several modern (aka MMIO v2) spec compliance * issues with this driver, but keep the status quo. */ *result = sc->vtmmio_version > 1; break; default: return (ENOENT); } return (0); } static int vtmmio_write_ivar(device_t dev, device_t child, int index, uintptr_t value) { struct vtmmio_softc *sc; sc = device_get_softc(dev); if (sc->vtmmio_child_dev != child) return (ENOENT); switch (index) { case VIRTIO_IVAR_FEATURE_DESC: sc->vtmmio_child_feat_desc = (void *) value; break; default: return (ENOENT); } return (0); } static uint64_t vtmmio_negotiate_features(device_t dev, uint64_t child_features) { struct vtmmio_softc *sc; uint64_t host_features, features; sc = device_get_softc(dev); if (sc->vtmmio_version > 1) { child_features |= VIRTIO_F_VERSION_1; } vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 1); host_features = vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES); host_features <<= 32; vtmmio_write_config_4(sc, VIRTIO_MMIO_HOST_FEATURES_SEL, 0); host_features |= vtmmio_read_config_4(sc, VIRTIO_MMIO_HOST_FEATURES); vtmmio_describe_features(sc, "host", host_features); /* * Limit negotiated features to what the driver, virtqueue, and * host all support. */ features = host_features & child_features; features = virtio_filter_transport_features(features); sc->vtmmio_features = features; vtmmio_describe_features(sc, "negotiated", features); vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 1); vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features >> 32); vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES_SEL, 0); vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_FEATURES, features); return (features); } static int vtmmio_finalize_features(device_t dev) { struct vtmmio_softc *sc; uint8_t status; sc = device_get_softc(dev); if (sc->vtmmio_version > 1) { /* * Must re-read the status after setting it to verify the * negotiated features were accepted by the device. */ vtmmio_set_status(dev, VIRTIO_CONFIG_S_FEATURES_OK); status = vtmmio_get_status(dev); if ((status & VIRTIO_CONFIG_S_FEATURES_OK) == 0) { device_printf(dev, "desired features were not accepted\n"); return (ENOTSUP); } } return (0); } static bool vtmmio_with_feature(device_t dev, uint64_t feature) { struct vtmmio_softc *sc; sc = device_get_softc(dev); return ((sc->vtmmio_features & feature) != 0); } static void vtmmio_set_virtqueue(struct vtmmio_softc *sc, struct virtqueue *vq, uint32_t size) { vm_paddr_t paddr; vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_NUM, size); if (sc->vtmmio_version == 1) { vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_ALIGN, VIRTIO_MMIO_VRING_ALIGN); paddr = virtqueue_paddr(vq); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, paddr >> PAGE_SHIFT); } else { paddr = virtqueue_desc_paddr(vq); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_LOW, paddr); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_DESC_HIGH, ((uint64_t)paddr) >> 32); paddr = virtqueue_avail_paddr(vq); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_LOW, paddr); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_AVAIL_HIGH, ((uint64_t)paddr) >> 32); paddr = virtqueue_used_paddr(vq); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_LOW, paddr); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_USED_HIGH, ((uint64_t)paddr) >> 32); vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 1); } } static int vtmmio_alloc_virtqueues(device_t dev, int nvqs, struct vq_alloc_info *vq_info) { struct vtmmio_virtqueue *vqx; struct vq_alloc_info *info; struct vtmmio_softc *sc; struct virtqueue *vq; uint32_t size; int idx, error; sc = device_get_softc(dev); if (sc->vtmmio_nvqs != 0) return (EALREADY); if (nvqs <= 0) return (EINVAL); sc->vtmmio_vqs = malloc(nvqs * sizeof(struct vtmmio_virtqueue), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtmmio_vqs == NULL) return (ENOMEM); if (sc->vtmmio_version == 1) { vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, (1 << PAGE_SHIFT)); } for (idx = 0; idx < nvqs; idx++) { vqx = &sc->vtmmio_vqs[idx]; info = &vq_info[idx]; vtmmio_select_virtqueue(sc, idx); size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); error = virtqueue_alloc(dev, idx, size, VIRTIO_MMIO_QUEUE_NOTIFY, VIRTIO_MMIO_VRING_ALIGN, ~(vm_paddr_t)0, info, &vq); if (error) { device_printf(dev, "cannot allocate virtqueue %d: %d\n", idx, error); break; } vtmmio_set_virtqueue(sc, vq, size); vqx->vtv_vq = *info->vqai_vq = vq; vqx->vtv_no_intr = info->vqai_intr == NULL; sc->vtmmio_nvqs++; } if (error) vtmmio_free_virtqueues(sc); return (error); } static void vtmmio_stop(device_t dev) { vtmmio_reset(device_get_softc(dev)); } static void vtmmio_poll(device_t dev) { struct vtmmio_softc *sc; sc = device_get_softc(dev); if (sc->platform != NULL) VIRTIO_MMIO_POLL(sc->platform); } static int vtmmio_reinit(device_t dev, uint64_t features) { struct vtmmio_softc *sc; int idx, error; sc = device_get_softc(dev); if (vtmmio_get_status(dev) != VIRTIO_CONFIG_STATUS_RESET) vtmmio_stop(dev); /* * Quickly drive the status through ACK and DRIVER. The device * does not become usable again until vtmmio_reinit_complete(). */ vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); /* * TODO: Check that features are not added as to what was * originally negotiated. */ vtmmio_negotiate_features(dev, features); error = vtmmio_finalize_features(dev); if (error) { device_printf(dev, "cannot finalize features during reinit\n"); return (error); } if (sc->vtmmio_version == 1) { vtmmio_write_config_4(sc, VIRTIO_MMIO_GUEST_PAGE_SIZE, (1 << PAGE_SHIFT)); } for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { error = vtmmio_reinit_virtqueue(sc, idx); if (error) return (error); } return (0); } static void vtmmio_reinit_complete(device_t dev) { vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); } static void vtmmio_notify_virtqueue(device_t dev, uint16_t queue, bus_size_t offset) { struct vtmmio_softc *sc; sc = device_get_softc(dev); MPASS(offset == VIRTIO_MMIO_QUEUE_NOTIFY); vtmmio_write_config_4(sc, offset, queue); } static int vtmmio_config_generation(device_t dev) { struct vtmmio_softc *sc; uint32_t gen; sc = device_get_softc(dev); if (sc->vtmmio_version > 1) gen = vtmmio_read_config_4(sc, VIRTIO_MMIO_CONFIG_GENERATION); else gen = 0; return (gen); } static uint8_t vtmmio_get_status(device_t dev) { struct vtmmio_softc *sc; sc = device_get_softc(dev); return (vtmmio_read_config_4(sc, VIRTIO_MMIO_STATUS)); } static void vtmmio_set_status(device_t dev, uint8_t status) { struct vtmmio_softc *sc; sc = device_get_softc(dev); if (status != VIRTIO_CONFIG_STATUS_RESET) status |= vtmmio_get_status(dev); vtmmio_write_config_4(sc, VIRTIO_MMIO_STATUS, status); } static void vtmmio_read_dev_config(device_t dev, bus_size_t offset, void *dst, int length) { struct vtmmio_softc *sc; bus_size_t off; uint8_t *d; int size; sc = device_get_softc(dev); off = VIRTIO_MMIO_CONFIG + offset; /* * The non-legacy MMIO specification adds the following restriction: * * 4.2.2.2: For the device-specific configuration space, the driver * MUST use 8 bit wide accesses for 8 bit wide fields, 16 bit wide * and aligned accesses for 16 bit wide fields and 32 bit wide and * aligned accesses for 32 and 64 bit wide fields. * * The endianness also varies between non-legacy and legacy: * * 2.4: Note: The device configuration space uses the little-endian * format for multi-byte fields. * * 2.4.3: Note that for legacy interfaces, device configuration space * is generally the guest’s native endian, rather than PCI’s * little-endian. The correct endian-ness is documented for each * device. */ if (sc->vtmmio_version > 1) { switch (length) { case 1: *(uint8_t *)dst = vtmmio_read_config_1(sc, off); break; case 2: *(uint16_t *)dst = le16toh(vtmmio_read_config_2(sc, off)); break; case 4: *(uint32_t *)dst = le32toh(vtmmio_read_config_4(sc, off)); break; case 8: *(uint64_t *)dst = vtmmio_read_dev_config_8(sc, off); break; default: panic("%s: invalid length %d\n", __func__, length); } return; } for (d = dst; length > 0; d += size, off += size, length -= size) { #ifdef ALLOW_WORD_ALIGNED_ACCESS if (length >= 4) { size = 4; *(uint32_t *)d = vtmmio_read_config_4(sc, off); } else if (length >= 2) { size = 2; *(uint16_t *)d = vtmmio_read_config_2(sc, off); } else #endif { size = 1; *d = vtmmio_read_config_1(sc, off); } } } static uint64_t vtmmio_read_dev_config_8(struct vtmmio_softc *sc, bus_size_t off) { device_t dev; int gen; uint32_t val0, val1; dev = sc->dev; do { gen = vtmmio_config_generation(dev); val0 = le32toh(vtmmio_read_config_4(sc, off)); val1 = le32toh(vtmmio_read_config_4(sc, off + 4)); } while (gen != vtmmio_config_generation(dev)); return (((uint64_t) val1 << 32) | val0); } static void vtmmio_write_dev_config(device_t dev, bus_size_t offset, const void *src, int length) { struct vtmmio_softc *sc; bus_size_t off; const uint8_t *s; int size; sc = device_get_softc(dev); off = VIRTIO_MMIO_CONFIG + offset; /* * The non-legacy MMIO specification adds size and alignment * restrctions. It also changes the endianness from native-endian to * little-endian. See vtmmio_read_dev_config. */ if (sc->vtmmio_version > 1) { switch (length) { case 1: vtmmio_write_config_1(sc, off, *(const uint8_t *)src); break; case 2: vtmmio_write_config_2(sc, off, htole16(*(const uint16_t *)src)); break; case 4: vtmmio_write_config_4(sc, off, htole32(*(const uint32_t *)src)); break; case 8: vtmmio_write_config_4(sc, off, htole32(*(const uint64_t *)src)); vtmmio_write_config_4(sc, off + 4, htole32((*(const uint64_t *)src) >> 32)); break; default: panic("%s: invalid length %d\n", __func__, length); } return; } for (s = src; length > 0; s += size, off += size, length -= size) { #ifdef ALLOW_WORD_ALIGNED_ACCESS if (length >= 4) { size = 4; vtmmio_write_config_4(sc, off, *(uint32_t *)s); } else if (length >= 2) { size = 2; vtmmio_write_config_2(sc, off, *(uint16_t *)s); } else #endif { size = 1; vtmmio_write_config_1(sc, off, *s); } } } static void vtmmio_describe_features(struct vtmmio_softc *sc, const char *msg, uint64_t features) { device_t dev, child; dev = sc->dev; child = sc->vtmmio_child_dev; if (device_is_attached(child) || bootverbose == 0) return; virtio_describe(dev, msg, features, sc->vtmmio_child_feat_desc); } static void vtmmio_probe_and_attach_child(struct vtmmio_softc *sc) { device_t dev, child; dev = sc->dev; child = sc->vtmmio_child_dev; if (child == NULL) return; if (device_get_state(child) != DS_NOTPRESENT) { return; } if (device_probe(child) != 0) { return; } vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER); if (device_attach(child) != 0) { vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_FAILED); vtmmio_reset(sc); vtmmio_release_child_resources(sc); /* Reset status for future attempt. */ vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_ACK); } else { vtmmio_set_status(dev, VIRTIO_CONFIG_STATUS_DRIVER_OK); VIRTIO_ATTACH_COMPLETED(child); } } static int vtmmio_reinit_virtqueue(struct vtmmio_softc *sc, int idx) { struct vtmmio_virtqueue *vqx; struct virtqueue *vq; int error; uint16_t size; vqx = &sc->vtmmio_vqs[idx]; vq = vqx->vtv_vq; KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx)); vtmmio_select_virtqueue(sc, idx); size = vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_NUM_MAX); error = virtqueue_reinit(vq, size); if (error) return (error); vtmmio_set_virtqueue(sc, vq, size); return (0); } static void vtmmio_free_interrupts(struct vtmmio_softc *sc) { if (sc->ih != NULL) bus_teardown_intr(sc->dev, sc->res[1], sc->ih); if (sc->res[1] != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, 0, sc->res[1]); } static void vtmmio_free_virtqueues(struct vtmmio_softc *sc) { struct vtmmio_virtqueue *vqx; int idx; for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { vqx = &sc->vtmmio_vqs[idx]; vtmmio_select_virtqueue(sc, idx); if (sc->vtmmio_version > 1) { vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_READY, 0); vtmmio_read_config_4(sc, VIRTIO_MMIO_QUEUE_READY); } else vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_PFN, 0); virtqueue_free(vqx->vtv_vq); vqx->vtv_vq = NULL; } free(sc->vtmmio_vqs, M_DEVBUF); sc->vtmmio_vqs = NULL; sc->vtmmio_nvqs = 0; } static void vtmmio_release_child_resources(struct vtmmio_softc *sc) { vtmmio_free_interrupts(sc); vtmmio_free_virtqueues(sc); } static void vtmmio_reset(struct vtmmio_softc *sc) { /* * Setting the status to RESET sets the host device to * the original, uninitialized state. */ vtmmio_set_status(sc->dev, VIRTIO_CONFIG_STATUS_RESET); } static void vtmmio_select_virtqueue(struct vtmmio_softc *sc, int idx) { vtmmio_write_config_4(sc, VIRTIO_MMIO_QUEUE_SEL, idx); } static void vtmmio_vq_intr(void *arg) { struct vtmmio_virtqueue *vqx; struct vtmmio_softc *sc; struct virtqueue *vq; uint32_t status; int idx; sc = arg; status = vtmmio_read_config_4(sc, VIRTIO_MMIO_INTERRUPT_STATUS); vtmmio_write_config_4(sc, VIRTIO_MMIO_INTERRUPT_ACK, status); /* The config changed */ if (status & VIRTIO_MMIO_INT_CONFIG) if (sc->vtmmio_child_dev != NULL) VIRTIO_CONFIG_CHANGE(sc->vtmmio_child_dev); /* Notify all virtqueues. */ if (status & VIRTIO_MMIO_INT_VRING) { for (idx = 0; idx < sc->vtmmio_nvqs; idx++) { vqx = &sc->vtmmio_vqs[idx]; if (vqx->vtv_no_intr == 0) { vq = vqx->vtv_vq; virtqueue_intr(vq); } } } } diff --git a/sys/dev/virtio/pci/virtio_pci.c b/sys/dev/virtio/pci/virtio_pci.c index fc26d62543c1..b7b34b448f6e 100644 --- a/sys/dev/virtio/pci/virtio_pci.c +++ b/sys/dev/virtio/pci/virtio_pci.c @@ -1,998 +1,994 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2017, Bryan Venteicher * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice unmodified, this list of conditions, and the following * disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "virtio_pci_if.h" #include "virtio_if.h" static void vtpci_describe_features(struct vtpci_common *, const char *, uint64_t); static int vtpci_alloc_msix(struct vtpci_common *, int); static int vtpci_alloc_msi(struct vtpci_common *); static int vtpci_alloc_intr_msix_pervq(struct vtpci_common *); static int vtpci_alloc_intr_msix_shared(struct vtpci_common *); static int vtpci_alloc_intr_msi(struct vtpci_common *); static int vtpci_alloc_intr_intx(struct vtpci_common *); static int vtpci_alloc_interrupt(struct vtpci_common *, int, int, struct vtpci_interrupt *); static void vtpci_free_interrupt(struct vtpci_common *, struct vtpci_interrupt *); static void vtpci_free_interrupts(struct vtpci_common *); static void vtpci_free_virtqueues(struct vtpci_common *); static void vtpci_cleanup_setup_intr_attempt(struct vtpci_common *); static int vtpci_alloc_intr_resources(struct vtpci_common *); static int vtpci_setup_intx_interrupt(struct vtpci_common *, enum intr_type); static int vtpci_setup_pervq_msix_interrupts(struct vtpci_common *, enum intr_type); static int vtpci_set_host_msix_vectors(struct vtpci_common *); static int vtpci_setup_msix_interrupts(struct vtpci_common *, enum intr_type); static int vtpci_setup_intrs(struct vtpci_common *, enum intr_type); static int vtpci_reinit_virtqueue(struct vtpci_common *, int); static void vtpci_intx_intr(void *); static int vtpci_vq_shared_intr_filter(void *); static void vtpci_vq_shared_intr(void *); static int vtpci_vq_intr_filter(void *); static void vtpci_vq_intr(void *); static void vtpci_config_intr(void *); static void vtpci_setup_sysctl(struct vtpci_common *); #define vtpci_setup_msi_interrupt vtpci_setup_intx_interrupt /* * This module contains two drivers: * - virtio_pci_legacy for pre-V1 support * - virtio_pci_modern for V1 support */ MODULE_VERSION(virtio_pci, 1); MODULE_DEPEND(virtio_pci, pci, 1, 1, 1); MODULE_DEPEND(virtio_pci, virtio, 1, 1, 1); int vtpci_disable_msix = 0; TUNABLE_INT("hw.virtio.pci.disable_msix", &vtpci_disable_msix); static uint8_t vtpci_read_isr(struct vtpci_common *cn) { return (VIRTIO_PCI_READ_ISR(cn->vtpci_dev)); } static uint16_t vtpci_get_vq_size(struct vtpci_common *cn, int idx) { return (VIRTIO_PCI_GET_VQ_SIZE(cn->vtpci_dev, idx)); } static bus_size_t vtpci_get_vq_notify_off(struct vtpci_common *cn, int idx) { return (VIRTIO_PCI_GET_VQ_NOTIFY_OFF(cn->vtpci_dev, idx)); } static void vtpci_set_vq(struct vtpci_common *cn, struct virtqueue *vq) { VIRTIO_PCI_SET_VQ(cn->vtpci_dev, vq); } static void vtpci_disable_vq(struct vtpci_common *cn, int idx) { VIRTIO_PCI_DISABLE_VQ(cn->vtpci_dev, idx); } static int vtpci_register_cfg_msix(struct vtpci_common *cn, struct vtpci_interrupt *intr) { return (VIRTIO_PCI_REGISTER_CFG_MSIX(cn->vtpci_dev, intr)); } static int vtpci_register_vq_msix(struct vtpci_common *cn, int idx, struct vtpci_interrupt *intr) { return (VIRTIO_PCI_REGISTER_VQ_MSIX(cn->vtpci_dev, idx, intr)); } void vtpci_init(struct vtpci_common *cn, device_t dev, bool modern) { cn->vtpci_dev = dev; pci_enable_busmaster(dev); if (modern) cn->vtpci_flags |= VTPCI_FLAG_MODERN; if (pci_find_cap(dev, PCIY_MSI, NULL) != 0) cn->vtpci_flags |= VTPCI_FLAG_NO_MSI; if (pci_find_cap(dev, PCIY_MSIX, NULL) != 0) cn->vtpci_flags |= VTPCI_FLAG_NO_MSIX; vtpci_setup_sysctl(cn); } int vtpci_add_child(struct vtpci_common *cn) { device_t dev, child; dev = cn->vtpci_dev; child = device_add_child(dev, NULL, DEVICE_UNIT_ANY); if (child == NULL) { device_printf(dev, "cannot create child device\n"); return (ENOMEM); } cn->vtpci_child_dev = child; return (0); } int vtpci_delete_child(struct vtpci_common *cn) { - device_t dev, child; + device_t dev; int error; dev = cn->vtpci_dev; - child = cn->vtpci_child_dev; - if (child != NULL) { - error = device_delete_child(dev, child); - if (error) - return (error); - cn->vtpci_child_dev = NULL; - } + error = bus_generic_detach(dev); + if (error) + return (error); return (0); } void vtpci_child_detached(struct vtpci_common *cn) { vtpci_release_child_resources(cn); cn->vtpci_child_feat_desc = NULL; cn->vtpci_host_features = 0; cn->vtpci_features = 0; } int vtpci_reinit(struct vtpci_common *cn) { int idx, error; for (idx = 0; idx < cn->vtpci_nvqs; idx++) { error = vtpci_reinit_virtqueue(cn, idx); if (error) return (error); } if (vtpci_is_msix_enabled(cn)) { error = vtpci_set_host_msix_vectors(cn); if (error) return (error); } return (0); } static void vtpci_describe_features(struct vtpci_common *cn, const char *msg, uint64_t features) { device_t dev, child; dev = cn->vtpci_dev; child = cn->vtpci_child_dev; if (device_is_attached(child) || bootverbose == 0) return; virtio_describe(dev, msg, features, cn->vtpci_child_feat_desc); } uint64_t vtpci_negotiate_features(struct vtpci_common *cn, uint64_t child_features, uint64_t host_features) { uint64_t features; cn->vtpci_host_features = host_features; vtpci_describe_features(cn, "host", host_features); /* * Limit negotiated features to what the driver, virtqueue, and * host all support. */ features = host_features & child_features; features = virtio_filter_transport_features(features); cn->vtpci_features = features; vtpci_describe_features(cn, "negotiated", features); return (features); } bool vtpci_with_feature(struct vtpci_common *cn, uint64_t feature) { return ((cn->vtpci_features & feature) != 0); } int vtpci_read_ivar(struct vtpci_common *cn, int index, uintptr_t *result) { device_t dev; int error; dev = cn->vtpci_dev; error = 0; switch (index) { case VIRTIO_IVAR_SUBDEVICE: *result = pci_get_subdevice(dev); break; case VIRTIO_IVAR_VENDOR: *result = pci_get_vendor(dev); break; case VIRTIO_IVAR_DEVICE: *result = pci_get_device(dev); break; case VIRTIO_IVAR_SUBVENDOR: *result = pci_get_subvendor(dev); break; case VIRTIO_IVAR_MODERN: *result = vtpci_is_modern(cn); break; default: error = ENOENT; } return (error); } int vtpci_write_ivar(struct vtpci_common *cn, int index, uintptr_t value) { int error; error = 0; switch (index) { case VIRTIO_IVAR_FEATURE_DESC: cn->vtpci_child_feat_desc = (void *) value; break; default: error = ENOENT; } return (error); } int vtpci_alloc_virtqueues(struct vtpci_common *cn, int nvqs, struct vq_alloc_info *vq_info) { device_t dev; int idx, align, error; dev = cn->vtpci_dev; /* * This is VIRTIO_PCI_VRING_ALIGN from legacy VirtIO. In modern VirtIO, * the tables do not have to be allocated contiguously, but we do so * anyways. */ align = 4096; if (cn->vtpci_nvqs != 0) return (EALREADY); if (nvqs <= 0) return (EINVAL); cn->vtpci_vqs = malloc(nvqs * sizeof(struct vtpci_virtqueue), M_DEVBUF, M_NOWAIT | M_ZERO); if (cn->vtpci_vqs == NULL) return (ENOMEM); for (idx = 0; idx < nvqs; idx++) { struct vtpci_virtqueue *vqx; struct vq_alloc_info *info; struct virtqueue *vq; bus_size_t notify_offset; uint16_t size; vqx = &cn->vtpci_vqs[idx]; info = &vq_info[idx]; size = vtpci_get_vq_size(cn, idx); notify_offset = vtpci_get_vq_notify_off(cn, idx); error = virtqueue_alloc(dev, idx, size, notify_offset, align, ~(vm_paddr_t)0, info, &vq); if (error) { device_printf(dev, "cannot allocate virtqueue %d: %d\n", idx, error); break; } vtpci_set_vq(cn, vq); vqx->vtv_vq = *info->vqai_vq = vq; vqx->vtv_no_intr = info->vqai_intr == NULL; cn->vtpci_nvqs++; } if (error) vtpci_free_virtqueues(cn); return (error); } static int vtpci_alloc_msix(struct vtpci_common *cn, int nvectors) { device_t dev; int nmsix, cnt, required; dev = cn->vtpci_dev; /* Allocate an additional vector for the config changes. */ required = nvectors + 1; nmsix = pci_msix_count(dev); if (nmsix < required) return (1); cnt = required; if (pci_alloc_msix(dev, &cnt) == 0 && cnt >= required) { cn->vtpci_nmsix_resources = required; return (0); } pci_release_msi(dev); return (1); } static int vtpci_alloc_msi(struct vtpci_common *cn) { device_t dev; int nmsi, cnt, required; dev = cn->vtpci_dev; required = 1; nmsi = pci_msi_count(dev); if (nmsi < required) return (1); cnt = required; if (pci_alloc_msi(dev, &cnt) == 0 && cnt >= required) return (0); pci_release_msi(dev); return (1); } static int vtpci_alloc_intr_msix_pervq(struct vtpci_common *cn) { int i, nvectors, error; if (vtpci_disable_msix != 0 || cn->vtpci_flags & VTPCI_FLAG_NO_MSIX) return (ENOTSUP); for (nvectors = 0, i = 0; i < cn->vtpci_nvqs; i++) { if (cn->vtpci_vqs[i].vtv_no_intr == 0) nvectors++; } error = vtpci_alloc_msix(cn, nvectors); if (error) return (error); cn->vtpci_flags |= VTPCI_FLAG_MSIX; return (0); } static int vtpci_alloc_intr_msix_shared(struct vtpci_common *cn) { int error; if (vtpci_disable_msix != 0 || cn->vtpci_flags & VTPCI_FLAG_NO_MSIX) return (ENOTSUP); error = vtpci_alloc_msix(cn, 1); if (error) return (error); cn->vtpci_flags |= VTPCI_FLAG_MSIX | VTPCI_FLAG_SHARED_MSIX; return (0); } static int vtpci_alloc_intr_msi(struct vtpci_common *cn) { int error; /* Only BHyVe supports MSI. */ if (cn->vtpci_flags & VTPCI_FLAG_NO_MSI) return (ENOTSUP); error = vtpci_alloc_msi(cn); if (error) return (error); cn->vtpci_flags |= VTPCI_FLAG_MSI; return (0); } static int vtpci_alloc_intr_intx(struct vtpci_common *cn) { cn->vtpci_flags |= VTPCI_FLAG_INTX; return (0); } static int vtpci_alloc_interrupt(struct vtpci_common *cn, int rid, int flags, struct vtpci_interrupt *intr) { struct resource *irq; irq = bus_alloc_resource_any(cn->vtpci_dev, SYS_RES_IRQ, &rid, flags); if (irq == NULL) return (ENXIO); intr->vti_irq = irq; intr->vti_rid = rid; return (0); } static void vtpci_free_interrupt(struct vtpci_common *cn, struct vtpci_interrupt *intr) { device_t dev; dev = cn->vtpci_dev; if (intr->vti_handler != NULL) { bus_teardown_intr(dev, intr->vti_irq, intr->vti_handler); intr->vti_handler = NULL; } if (intr->vti_irq != NULL) { bus_release_resource(dev, SYS_RES_IRQ, intr->vti_rid, intr->vti_irq); intr->vti_irq = NULL; intr->vti_rid = -1; } } static void vtpci_free_interrupts(struct vtpci_common *cn) { struct vtpci_interrupt *intr; int i, nvq_intrs; vtpci_free_interrupt(cn, &cn->vtpci_device_interrupt); if (cn->vtpci_nmsix_resources != 0) { nvq_intrs = cn->vtpci_nmsix_resources - 1; cn->vtpci_nmsix_resources = 0; if ((intr = cn->vtpci_msix_vq_interrupts) != NULL) { for (i = 0; i < nvq_intrs; i++, intr++) vtpci_free_interrupt(cn, intr); free(cn->vtpci_msix_vq_interrupts, M_DEVBUF); cn->vtpci_msix_vq_interrupts = NULL; } } if (cn->vtpci_flags & (VTPCI_FLAG_MSI | VTPCI_FLAG_MSIX)) pci_release_msi(cn->vtpci_dev); cn->vtpci_flags &= ~VTPCI_FLAG_ITYPE_MASK; } static void vtpci_free_virtqueues(struct vtpci_common *cn) { struct vtpci_virtqueue *vqx; int idx; for (idx = 0; idx < cn->vtpci_nvqs; idx++) { vtpci_disable_vq(cn, idx); vqx = &cn->vtpci_vqs[idx]; virtqueue_free(vqx->vtv_vq); vqx->vtv_vq = NULL; } free(cn->vtpci_vqs, M_DEVBUF); cn->vtpci_vqs = NULL; cn->vtpci_nvqs = 0; } void vtpci_release_child_resources(struct vtpci_common *cn) { vtpci_free_interrupts(cn); vtpci_free_virtqueues(cn); } static void vtpci_cleanup_setup_intr_attempt(struct vtpci_common *cn) { int idx; if (cn->vtpci_flags & VTPCI_FLAG_MSIX) { vtpci_register_cfg_msix(cn, NULL); for (idx = 0; idx < cn->vtpci_nvqs; idx++) vtpci_register_vq_msix(cn, idx, NULL); } vtpci_free_interrupts(cn); } static int vtpci_alloc_intr_resources(struct vtpci_common *cn) { struct vtpci_interrupt *intr; int i, rid, flags, nvq_intrs, error; flags = RF_ACTIVE; if (cn->vtpci_flags & VTPCI_FLAG_INTX) { rid = 0; flags |= RF_SHAREABLE; } else rid = 1; /* * When using INTX or MSI interrupts, this resource handles all * interrupts. When using MSIX, this resource handles just the * configuration changed interrupt. */ intr = &cn->vtpci_device_interrupt; error = vtpci_alloc_interrupt(cn, rid, flags, intr); if (error || cn->vtpci_flags & (VTPCI_FLAG_INTX | VTPCI_FLAG_MSI)) return (error); /* * Now allocate the interrupts for the virtqueues. This may be one * for all the virtqueues, or one for each virtqueue. Subtract one * below for because of the configuration changed interrupt. */ nvq_intrs = cn->vtpci_nmsix_resources - 1; cn->vtpci_msix_vq_interrupts = malloc(nvq_intrs * sizeof(struct vtpci_interrupt), M_DEVBUF, M_NOWAIT | M_ZERO); if (cn->vtpci_msix_vq_interrupts == NULL) return (ENOMEM); intr = cn->vtpci_msix_vq_interrupts; for (i = 0, rid++; i < nvq_intrs; i++, rid++, intr++) { error = vtpci_alloc_interrupt(cn, rid, flags, intr); if (error) return (error); } return (0); } static int vtpci_setup_intx_interrupt(struct vtpci_common *cn, enum intr_type type) { struct vtpci_interrupt *intr; int error; intr = &cn->vtpci_device_interrupt; error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type, NULL, vtpci_intx_intr, cn, &intr->vti_handler); return (error); } static int vtpci_setup_pervq_msix_interrupts(struct vtpci_common *cn, enum intr_type type) { struct vtpci_virtqueue *vqx; struct vtpci_interrupt *intr; int i, error; intr = cn->vtpci_msix_vq_interrupts; for (i = 0; i < cn->vtpci_nvqs; i++) { vqx = &cn->vtpci_vqs[i]; if (vqx->vtv_no_intr) continue; error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type, vtpci_vq_intr_filter, vtpci_vq_intr, vqx->vtv_vq, &intr->vti_handler); if (error) return (error); intr++; } return (0); } static int vtpci_set_host_msix_vectors(struct vtpci_common *cn) { struct vtpci_interrupt *intr, *tintr; int idx, error; intr = &cn->vtpci_device_interrupt; error = vtpci_register_cfg_msix(cn, intr); if (error) return (error); intr = cn->vtpci_msix_vq_interrupts; for (idx = 0; idx < cn->vtpci_nvqs; idx++) { if (cn->vtpci_vqs[idx].vtv_no_intr) tintr = NULL; else tintr = intr; error = vtpci_register_vq_msix(cn, idx, tintr); if (error) break; /* * For shared MSIX, all the virtqueues share the first * interrupt. */ if (!cn->vtpci_vqs[idx].vtv_no_intr && (cn->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) == 0) intr++; } return (error); } static int vtpci_setup_msix_interrupts(struct vtpci_common *cn, enum intr_type type) { struct vtpci_interrupt *intr; int error; intr = &cn->vtpci_device_interrupt; error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type, NULL, vtpci_config_intr, cn, &intr->vti_handler); if (error) return (error); if (cn->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) { intr = &cn->vtpci_msix_vq_interrupts[0]; error = bus_setup_intr(cn->vtpci_dev, intr->vti_irq, type, vtpci_vq_shared_intr_filter, vtpci_vq_shared_intr, cn, &intr->vti_handler); } else error = vtpci_setup_pervq_msix_interrupts(cn, type); return (error ? error : vtpci_set_host_msix_vectors(cn)); } static int vtpci_setup_intrs(struct vtpci_common *cn, enum intr_type type) { int error; type |= INTR_MPSAFE; KASSERT(cn->vtpci_flags & VTPCI_FLAG_ITYPE_MASK, ("%s: no interrupt type selected %#x", __func__, cn->vtpci_flags)); error = vtpci_alloc_intr_resources(cn); if (error) return (error); if (cn->vtpci_flags & VTPCI_FLAG_INTX) error = vtpci_setup_intx_interrupt(cn, type); else if (cn->vtpci_flags & VTPCI_FLAG_MSI) error = vtpci_setup_msi_interrupt(cn, type); else error = vtpci_setup_msix_interrupts(cn, type); return (error); } int vtpci_setup_interrupts(struct vtpci_common *cn, enum intr_type type) { device_t dev; int attempt, error; dev = cn->vtpci_dev; for (attempt = 0; attempt < 5; attempt++) { /* * Start with the most desirable interrupt configuration and * fallback towards less desirable ones. */ switch (attempt) { case 0: error = vtpci_alloc_intr_msix_pervq(cn); break; case 1: error = vtpci_alloc_intr_msix_shared(cn); break; case 2: error = vtpci_alloc_intr_msi(cn); break; case 3: error = vtpci_alloc_intr_intx(cn); break; default: device_printf(dev, "exhausted all interrupt allocation attempts\n"); return (ENXIO); } if (error == 0 && vtpci_setup_intrs(cn, type) == 0) break; vtpci_cleanup_setup_intr_attempt(cn); } if (bootverbose) { if (cn->vtpci_flags & VTPCI_FLAG_INTX) device_printf(dev, "using legacy interrupt\n"); else if (cn->vtpci_flags & VTPCI_FLAG_MSI) device_printf(dev, "using MSI interrupt\n"); else if (cn->vtpci_flags & VTPCI_FLAG_SHARED_MSIX) device_printf(dev, "using shared MSIX interrupts\n"); else device_printf(dev, "using per VQ MSIX interrupts\n"); } return (0); } static int vtpci_reinit_virtqueue(struct vtpci_common *cn, int idx) { struct vtpci_virtqueue *vqx; struct virtqueue *vq; int error; vqx = &cn->vtpci_vqs[idx]; vq = vqx->vtv_vq; KASSERT(vq != NULL, ("%s: vq %d not allocated", __func__, idx)); error = virtqueue_reinit(vq, vtpci_get_vq_size(cn, idx)); if (error == 0) vtpci_set_vq(cn, vq); return (error); } static void vtpci_intx_intr(void *xcn) { struct vtpci_common *cn; struct vtpci_virtqueue *vqx; int i; uint8_t isr; cn = xcn; isr = vtpci_read_isr(cn); if (isr & VIRTIO_PCI_ISR_CONFIG) vtpci_config_intr(cn); if (isr & VIRTIO_PCI_ISR_INTR) { vqx = &cn->vtpci_vqs[0]; for (i = 0; i < cn->vtpci_nvqs; i++, vqx++) { if (vqx->vtv_no_intr == 0) virtqueue_intr(vqx->vtv_vq); } } } static int vtpci_vq_shared_intr_filter(void *xcn) { struct vtpci_common *cn; struct vtpci_virtqueue *vqx; int i, rc; cn = xcn; vqx = &cn->vtpci_vqs[0]; rc = 0; for (i = 0; i < cn->vtpci_nvqs; i++, vqx++) { if (vqx->vtv_no_intr == 0) rc |= virtqueue_intr_filter(vqx->vtv_vq); } return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); } static void vtpci_vq_shared_intr(void *xcn) { struct vtpci_common *cn; struct vtpci_virtqueue *vqx; int i; cn = xcn; vqx = &cn->vtpci_vqs[0]; for (i = 0; i < cn->vtpci_nvqs; i++, vqx++) { if (vqx->vtv_no_intr == 0) virtqueue_intr(vqx->vtv_vq); } } static int vtpci_vq_intr_filter(void *xvq) { struct virtqueue *vq; int rc; vq = xvq; rc = virtqueue_intr_filter(vq); return (rc ? FILTER_SCHEDULE_THREAD : FILTER_STRAY); } static void vtpci_vq_intr(void *xvq) { struct virtqueue *vq; vq = xvq; virtqueue_intr(vq); } static void vtpci_config_intr(void *xcn) { struct vtpci_common *cn; device_t child; cn = xcn; child = cn->vtpci_child_dev; if (child != NULL) VIRTIO_CONFIG_CHANGE(child); } static int vtpci_feature_sysctl(struct sysctl_req *req, struct vtpci_common *cn, uint64_t features) { struct sbuf *sb; int error; sb = sbuf_new_for_sysctl(NULL, NULL, 256, req); if (sb == NULL) return (ENOMEM); error = virtio_describe_sbuf(sb, features, cn->vtpci_child_feat_desc); sbuf_delete(sb); return (error); } static int vtpci_host_features_sysctl(SYSCTL_HANDLER_ARGS) { struct vtpci_common *cn; cn = arg1; return (vtpci_feature_sysctl(req, cn, cn->vtpci_host_features)); } static int vtpci_negotiated_features_sysctl(SYSCTL_HANDLER_ARGS) { struct vtpci_common *cn; cn = arg1; return (vtpci_feature_sysctl(req, cn, cn->vtpci_features)); } static void vtpci_setup_sysctl(struct vtpci_common *cn) { device_t dev; struct sysctl_ctx_list *ctx; struct sysctl_oid *tree; struct sysctl_oid_list *child; dev = cn->vtpci_dev; ctx = device_get_sysctl_ctx(dev); tree = device_get_sysctl_tree(dev); child = SYSCTL_CHILDREN(tree); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "nvqs", CTLFLAG_RD, &cn->vtpci_nvqs, 0, "Number of virtqueues"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "host_features", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, cn, 0, vtpci_host_features_sysctl, "A", "Features supported by the host"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "negotiated_features", CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, cn, 0, vtpci_negotiated_features_sysctl, "A", "Features negotiated"); } diff --git a/sys/dev/xilinx/if_xae.c b/sys/dev/xilinx/if_xae.c index 26f1bf805ffa..080b13606525 100644 --- a/sys/dev/xilinx/if_xae.c +++ b/sys/dev/xilinx/if_xae.c @@ -1,1144 +1,1143 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2019 Ruslan Bukin * * This software was developed by SRI International and the University of * Cambridge Computer Laboratory (Department of Computer Science and * Technology) under DARPA contract HR0011-18-C-0016 ("ECATS"), as part of the * DARPA SSITH research programme. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miibus_if.h" #define READ4(_sc, _reg) \ bus_read_4((_sc)->res[0], _reg) #define WRITE4(_sc, _reg, _val) \ bus_write_4((_sc)->res[0], _reg, _val) #define READ8(_sc, _reg) \ bus_read_8((_sc)->res[0], _reg) #define WRITE8(_sc, _reg, _val) \ bus_write_8((_sc)->res[0], _reg, _val) #define XAE_LOCK(sc) mtx_lock(&(sc)->mtx) #define XAE_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define XAE_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) #define XAE_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) #define XAE_DEBUG #undef XAE_DEBUG #ifdef XAE_DEBUG #define dprintf(fmt, ...) printf(fmt, ##__VA_ARGS__) #else #define dprintf(fmt, ...) #endif #define RX_QUEUE_SIZE 64 #define TX_QUEUE_SIZE 64 #define NUM_RX_MBUF 16 #define BUFRING_SIZE 8192 #define MDIO_CLK_DIV_DEFAULT 29 #define PHY1_RD(sc, _r) \ xae_miibus_read_reg(sc->dev, 1, _r) #define PHY1_WR(sc, _r, _v) \ xae_miibus_write_reg(sc->dev, 1, _r, _v) #define PHY_RD(sc, _r) \ xae_miibus_read_reg(sc->dev, sc->phy_addr, _r) #define PHY_WR(sc, _r, _v) \ xae_miibus_write_reg(sc->dev, sc->phy_addr, _r, _v) /* Use this macro to access regs > 0x1f */ #define WRITE_TI_EREG(sc, reg, data) { \ PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK); \ PHY_WR(sc, MII_MMDAADR, reg); \ PHY_WR(sc, MII_MMDACR, MMDACR_DADDRMASK | MMDACR_FN_DATANPI); \ PHY_WR(sc, MII_MMDAADR, data); \ } /* Not documented, Xilinx VCU118 workaround */ #define CFG4_SGMII_TMR 0x160 /* bits 8:7 MUST be '10' */ #define DP83867_SGMIICTL1 0xD3 /* not documented register */ #define SGMIICTL1_SGMII_6W (1 << 14) /* no idea what it is */ static struct resource_spec xae_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { -1, 0 } }; static void xae_stop_locked(struct xae_softc *sc); static void xae_setup_rxfilter(struct xae_softc *sc); static int xae_rx_enqueue(struct xae_softc *sc, uint32_t n) { struct mbuf *m; int i; for (i = 0; i < n; i++) { m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) { device_printf(sc->dev, "%s: Can't alloc rx mbuf\n", __func__); return (-1); } m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; xdma_enqueue_mbuf(sc->xchan_rx, &m, 0, 4, 4, XDMA_DEV_TO_MEM); } return (0); } static int xae_get_phyaddr(phandle_t node, int *phy_addr) { phandle_t phy_node; pcell_t phy_handle, phy_reg; if (OF_getencprop(node, "phy-handle", (void *)&phy_handle, sizeof(phy_handle)) <= 0) return (ENXIO); phy_node = OF_node_from_xref(phy_handle); if (OF_getencprop(phy_node, "reg", (void *)&phy_reg, sizeof(phy_reg)) <= 0) return (ENXIO); *phy_addr = phy_reg; return (0); } static int xae_xdma_tx_intr(void *arg, xdma_transfer_status_t *status) { xdma_transfer_status_t st; struct xae_softc *sc; if_t ifp; struct mbuf *m; int err; sc = arg; XAE_LOCK(sc); ifp = sc->ifp; for (;;) { err = xdma_dequeue_mbuf(sc->xchan_tx, &m, &st); if (err != 0) { break; } if (st.error != 0) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); } m_freem(m); } if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); XAE_UNLOCK(sc); return (0); } static int xae_xdma_rx_intr(void *arg, xdma_transfer_status_t *status) { xdma_transfer_status_t st; struct xae_softc *sc; if_t ifp; struct mbuf *m; int err; uint32_t cnt_processed; sc = arg; dprintf("%s\n", __func__); XAE_LOCK(sc); ifp = sc->ifp; cnt_processed = 0; for (;;) { err = xdma_dequeue_mbuf(sc->xchan_rx, &m, &st); if (err != 0) { break; } cnt_processed++; if (st.error != 0) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); m_freem(m); continue; } m->m_pkthdr.len = m->m_len = st.transferred; m->m_pkthdr.rcvif = ifp; XAE_UNLOCK(sc); if_input(ifp, m); XAE_LOCK(sc); } xae_rx_enqueue(sc, cnt_processed); XAE_UNLOCK(sc); return (0); } static void xae_qflush(if_t ifp) { } static int xae_transmit_locked(if_t ifp) { struct xae_softc *sc; struct mbuf *m; struct buf_ring *br; int error; int enq; dprintf("%s\n", __func__); sc = if_getsoftc(ifp); br = sc->br; enq = 0; while ((m = drbr_peek(ifp, br)) != NULL) { error = xdma_enqueue_mbuf(sc->xchan_tx, &m, 0, 4, 4, XDMA_MEM_TO_DEV); if (error != 0) { /* No space in request queue available yet. */ drbr_putback(ifp, br, m); break; } drbr_advance(ifp, br); enq++; /* If anyone is interested give them a copy. */ ETHER_BPF_MTAP(ifp, m); } if (enq > 0) xdma_queue_submit(sc->xchan_tx); return (0); } static int xae_transmit(if_t ifp, struct mbuf *m) { struct xae_softc *sc; int error; dprintf("%s\n", __func__); sc = if_getsoftc(ifp); XAE_LOCK(sc); error = drbr_enqueue(ifp, sc->br, m); if (error) { XAE_UNLOCK(sc); return (error); } if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { XAE_UNLOCK(sc); return (0); } if (!sc->link_is_up) { XAE_UNLOCK(sc); return (0); } error = xae_transmit_locked(ifp); XAE_UNLOCK(sc); return (error); } static void xae_stop_locked(struct xae_softc *sc) { if_t ifp; uint32_t reg; XAE_ASSERT_LOCKED(sc); ifp = sc->ifp; if_setdrvflagbits(ifp, 0, (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)); callout_stop(&sc->xae_callout); /* Stop the transmitter */ reg = READ4(sc, XAE_TC); reg &= ~TC_TX; WRITE4(sc, XAE_TC, reg); /* Stop the receiver. */ reg = READ4(sc, XAE_RCW1); reg &= ~RCW1_RX; WRITE4(sc, XAE_RCW1, reg); } static uint64_t xae_stat(struct xae_softc *sc, int counter_id) { uint64_t new, old; uint64_t delta; KASSERT(counter_id < XAE_MAX_COUNTERS, ("counter %d is out of range", counter_id)); new = READ8(sc, XAE_STATCNT(counter_id)); old = sc->counters[counter_id]; if (new >= old) delta = new - old; else delta = UINT64_MAX - old + new; sc->counters[counter_id] = new; return (delta); } static void xae_harvest_stats(struct xae_softc *sc) { if_t ifp; ifp = sc->ifp; if_inc_counter(ifp, IFCOUNTER_IPACKETS, xae_stat(sc, RX_GOOD_FRAMES)); if_inc_counter(ifp, IFCOUNTER_IMCASTS, xae_stat(sc, RX_GOOD_MCASTS)); if_inc_counter(ifp, IFCOUNTER_IERRORS, xae_stat(sc, RX_FRAME_CHECK_SEQ_ERROR) + xae_stat(sc, RX_LEN_OUT_OF_RANGE) + xae_stat(sc, RX_ALIGNMENT_ERRORS)); if_inc_counter(ifp, IFCOUNTER_OBYTES, xae_stat(sc, TX_BYTES)); if_inc_counter(ifp, IFCOUNTER_OPACKETS, xae_stat(sc, TX_GOOD_FRAMES)); if_inc_counter(ifp, IFCOUNTER_OMCASTS, xae_stat(sc, TX_GOOD_MCASTS)); if_inc_counter(ifp, IFCOUNTER_OERRORS, xae_stat(sc, TX_GOOD_UNDERRUN_ERRORS)); if_inc_counter(ifp, IFCOUNTER_COLLISIONS, xae_stat(sc, TX_SINGLE_COLLISION_FRAMES) + xae_stat(sc, TX_MULTI_COLLISION_FRAMES) + xae_stat(sc, TX_LATE_COLLISIONS) + xae_stat(sc, TX_EXCESS_COLLISIONS)); } static void xae_tick(void *arg) { struct xae_softc *sc; if_t ifp; int link_was_up; sc = arg; XAE_ASSERT_LOCKED(sc); ifp = sc->ifp; if (!(if_getdrvflags(ifp) & IFF_DRV_RUNNING)) return; /* Gather stats from hardware counters. */ xae_harvest_stats(sc); /* Check the media status. */ link_was_up = sc->link_is_up; mii_tick(sc->mii_softc); if (sc->link_is_up && !link_was_up) xae_transmit_locked(sc->ifp); /* Schedule another check one second from now. */ callout_reset(&sc->xae_callout, hz, xae_tick, sc); } static void xae_init_locked(struct xae_softc *sc) { if_t ifp; XAE_ASSERT_LOCKED(sc); ifp = sc->ifp; if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; if_setdrvflagbits(ifp, IFF_DRV_RUNNING, 0); xae_setup_rxfilter(sc); /* Enable the transmitter */ WRITE4(sc, XAE_TC, TC_TX); /* Enable the receiver. */ WRITE4(sc, XAE_RCW1, RCW1_RX); /* * Call mii_mediachg() which will call back into xae_miibus_statchg() * to set up the remaining config registers based on current media. */ mii_mediachg(sc->mii_softc); callout_reset(&sc->xae_callout, hz, xae_tick, sc); } static void xae_init(void *arg) { struct xae_softc *sc; sc = arg; XAE_LOCK(sc); xae_init_locked(sc); XAE_UNLOCK(sc); } static void xae_media_status(if_t ifp, struct ifmediareq *ifmr) { struct xae_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = sc->mii_softc; XAE_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; XAE_UNLOCK(sc); } static int xae_media_change_locked(struct xae_softc *sc) { return (mii_mediachg(sc->mii_softc)); } static int xae_media_change(if_t ifp) { struct xae_softc *sc; int error; sc = if_getsoftc(ifp); XAE_LOCK(sc); error = xae_media_change_locked(sc); XAE_UNLOCK(sc); return (error); } static u_int xae_write_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt) { struct xae_softc *sc = arg; uint32_t reg; uint8_t *ma; if (cnt >= XAE_MULTICAST_TABLE_SIZE) return (1); ma = LLADDR(sdl); reg = READ4(sc, XAE_FFC) & 0xffffff00; reg |= cnt; WRITE4(sc, XAE_FFC, reg); reg = (ma[0]); reg |= (ma[1] << 8); reg |= (ma[2] << 16); reg |= (ma[3] << 24); WRITE4(sc, XAE_FFV(0), reg); reg = ma[4]; reg |= ma[5] << 8; WRITE4(sc, XAE_FFV(1), reg); return (1); } static void xae_setup_rxfilter(struct xae_softc *sc) { if_t ifp; uint32_t reg; XAE_ASSERT_LOCKED(sc); ifp = sc->ifp; /* * Set the multicast (group) filter hash. */ if ((if_getflags(ifp) & (IFF_ALLMULTI | IFF_PROMISC)) != 0) { reg = READ4(sc, XAE_FFC); reg |= FFC_PM; WRITE4(sc, XAE_FFC, reg); } else { reg = READ4(sc, XAE_FFC); reg &= ~FFC_PM; WRITE4(sc, XAE_FFC, reg); if_foreach_llmaddr(ifp, xae_write_maddr, sc); } /* * Set the primary address. */ reg = sc->macaddr[0]; reg |= (sc->macaddr[1] << 8); reg |= (sc->macaddr[2] << 16); reg |= (sc->macaddr[3] << 24); WRITE4(sc, XAE_UAW0, reg); reg = sc->macaddr[4]; reg |= (sc->macaddr[5] << 8); WRITE4(sc, XAE_UAW1, reg); } static int xae_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct xae_softc *sc; struct mii_data *mii; struct ifreq *ifr; int mask, error; sc = if_getsoftc(ifp); ifr = (struct ifreq *)data; error = 0; switch (cmd) { case SIOCSIFFLAGS: XAE_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { if ((if_getflags(ifp) ^ sc->if_flags) & (IFF_PROMISC | IFF_ALLMULTI)) xae_setup_rxfilter(sc); } else { if (!sc->is_detaching) xae_init_locked(sc); } } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) xae_stop_locked(sc); } sc->if_flags = if_getflags(ifp); XAE_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { XAE_LOCK(sc); xae_setup_rxfilter(sc); XAE_UNLOCK(sc); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: mii = sc->mii_softc; error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: mask = if_getcapenable(ifp) ^ ifr->ifr_reqcap; if (mask & IFCAP_VLAN_MTU) { /* No work to do except acknowledge the change took */ if_togglecapenable(ifp, IFCAP_VLAN_MTU); } break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void xae_intr(void *arg) { } static int xae_get_hwaddr(struct xae_softc *sc, uint8_t *hwaddr) { phandle_t node; int len; node = ofw_bus_get_node(sc->dev); /* Check if there is property */ if ((len = OF_getproplen(node, "local-mac-address")) <= 0) return (EINVAL); if (len != ETHER_ADDR_LEN) return (EINVAL); OF_getprop(node, "local-mac-address", hwaddr, ETHER_ADDR_LEN); return (0); } static int mdio_wait(struct xae_softc *sc) { uint32_t reg; int timeout; timeout = 200; do { reg = READ4(sc, XAE_MDIO_CTRL); if (reg & MDIO_CTRL_READY) break; DELAY(1); } while (timeout--); if (timeout <= 0) { printf("Failed to get MDIO ready\n"); return (1); } return (0); } static int xae_miibus_read_reg(device_t dev, int phy, int reg) { struct xae_softc *sc; uint32_t mii; int rv; sc = device_get_softc(dev); if (mdio_wait(sc)) return (0); mii = MDIO_CTRL_TX_OP_READ | MDIO_CTRL_INITIATE; mii |= (reg << MDIO_TX_REGAD_S); mii |= (phy << MDIO_TX_PHYAD_S); WRITE4(sc, XAE_MDIO_CTRL, mii); if (mdio_wait(sc)) return (0); rv = READ4(sc, XAE_MDIO_READ); return (rv); } static int xae_miibus_write_reg(device_t dev, int phy, int reg, int val) { struct xae_softc *sc; uint32_t mii; sc = device_get_softc(dev); if (mdio_wait(sc)) return (1); mii = MDIO_CTRL_TX_OP_WRITE | MDIO_CTRL_INITIATE; mii |= (reg << MDIO_TX_REGAD_S); mii |= (phy << MDIO_TX_PHYAD_S); WRITE4(sc, XAE_MDIO_WRITE, val); WRITE4(sc, XAE_MDIO_CTRL, mii); if (mdio_wait(sc)) return (1); return (0); } static void xae_phy_fixup(struct xae_softc *sc) { uint32_t reg; do { WRITE_TI_EREG(sc, DP83867_SGMIICTL1, SGMIICTL1_SGMII_6W); PHY_WR(sc, DP83867_PHYCR, PHYCR_SGMII_EN); reg = PHY_RD(sc, DP83867_CFG2); reg &= ~CFG2_SPEED_OPT_ATTEMPT_CNT_M; reg |= (CFG2_SPEED_OPT_ATTEMPT_CNT_4); reg |= CFG2_INTERRUPT_POLARITY; reg |= CFG2_SPEED_OPT_ENHANCED_EN; reg |= CFG2_SPEED_OPT_10M_EN; PHY_WR(sc, DP83867_CFG2, reg); WRITE_TI_EREG(sc, DP83867_CFG4, CFG4_SGMII_TMR); PHY_WR(sc, MII_BMCR, BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_RESET); } while (PHY1_RD(sc, MII_BMCR) == 0x0ffff); do { PHY1_WR(sc, MII_BMCR, BMCR_AUTOEN | BMCR_FDX | BMCR_SPEED1 | BMCR_STARTNEG); DELAY(40000); } while ((PHY1_RD(sc, MII_BMSR) & BMSR_ACOMP) == 0); } static int get_xdma_std(struct xae_softc *sc) { sc->xdma_tx = xdma_ofw_get(sc->dev, "tx"); if (sc->xdma_tx == NULL) return (ENXIO); sc->xdma_rx = xdma_ofw_get(sc->dev, "rx"); if (sc->xdma_rx == NULL) { xdma_put(sc->xdma_tx); return (ENXIO); } return (0); } static int get_xdma_axistream(struct xae_softc *sc) { struct axidma_fdt_data *data; device_t dma_dev; phandle_t node; pcell_t prop; size_t len; node = ofw_bus_get_node(sc->dev); len = OF_getencprop(node, "axistream-connected", &prop, sizeof(prop)); if (len != sizeof(prop)) { device_printf(sc->dev, "%s: Couldn't get axistream-connected prop.\n", __func__); return (ENXIO); } dma_dev = OF_device_from_xref(prop); if (dma_dev == NULL) { device_printf(sc->dev, "Could not get DMA device by xref.\n"); return (ENXIO); } sc->xdma_tx = xdma_get(sc->dev, dma_dev); if (sc->xdma_tx == NULL) { device_printf(sc->dev, "Could not find DMA controller.\n"); return (ENXIO); } data = malloc(sizeof(struct axidma_fdt_data), M_DEVBUF, (M_WAITOK | M_ZERO)); data->id = AXIDMA_TX_CHAN; sc->xdma_tx->data = data; sc->xdma_rx = xdma_get(sc->dev, dma_dev); if (sc->xdma_rx == NULL) { device_printf(sc->dev, "Could not find DMA controller.\n"); return (ENXIO); } data = malloc(sizeof(struct axidma_fdt_data), M_DEVBUF, (M_WAITOK | M_ZERO)); data->id = AXIDMA_RX_CHAN; sc->xdma_rx->data = data; return (0); } static int setup_xdma(struct xae_softc *sc) { device_t dev; vmem_t *vmem; int error; dev = sc->dev; /* Get xDMA controller */ error = get_xdma_std(sc); if (error) { device_printf(sc->dev, "Fallback to axistream-connected property\n"); error = get_xdma_axistream(sc); } if (error) { device_printf(dev, "Could not find xDMA controllers.\n"); return (ENXIO); } /* Alloc xDMA TX virtual channel. */ sc->xchan_tx = xdma_channel_alloc(sc->xdma_tx, 0); if (sc->xchan_tx == NULL) { device_printf(dev, "Can't alloc virtual DMA TX channel.\n"); return (ENXIO); } /* Setup interrupt handler. */ error = xdma_setup_intr(sc->xchan_tx, 0, xae_xdma_tx_intr, sc, &sc->ih_tx); if (error) { device_printf(sc->dev, "Can't setup xDMA TX interrupt handler.\n"); return (ENXIO); } /* Alloc xDMA RX virtual channel. */ sc->xchan_rx = xdma_channel_alloc(sc->xdma_rx, 0); if (sc->xchan_rx == NULL) { device_printf(dev, "Can't alloc virtual DMA RX channel.\n"); return (ENXIO); } /* Setup interrupt handler. */ error = xdma_setup_intr(sc->xchan_rx, XDMA_INTR_NET, xae_xdma_rx_intr, sc, &sc->ih_rx); if (error) { device_printf(sc->dev, "Can't setup xDMA RX interrupt handler.\n"); return (ENXIO); } /* Setup bounce buffer */ vmem = xdma_get_memory(dev); if (vmem) { xchan_set_memory(sc->xchan_tx, vmem); xchan_set_memory(sc->xchan_rx, vmem); } xdma_prep_sg(sc->xchan_tx, TX_QUEUE_SIZE, /* xchan requests queue size */ MCLBYTES, /* maxsegsize */ 8, /* maxnsegs */ 16, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR); xdma_prep_sg(sc->xchan_rx, RX_QUEUE_SIZE, /* xchan requests queue size */ MCLBYTES, /* maxsegsize */ 1, /* maxnsegs */ 16, /* alignment */ 0, /* boundary */ BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR); return (0); } static int xae_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "xlnx,axi-ethernet-1.00.a")) return (ENXIO); device_set_desc(dev, "Xilinx AXI Ethernet"); return (BUS_PROBE_DEFAULT); } static int xae_attach(device_t dev) { struct xae_softc *sc; if_t ifp; phandle_t node; uint32_t reg; int error; sc = device_get_softc(dev); sc->dev = dev; node = ofw_bus_get_node(dev); if (setup_xdma(sc) != 0) { device_printf(dev, "Could not setup xDMA.\n"); return (ENXIO); } mtx_init(&sc->mtx, device_get_nameunit(sc->dev), MTX_NETWORK_LOCK, MTX_DEF); sc->br = buf_ring_alloc(BUFRING_SIZE, M_DEVBUF, M_NOWAIT, &sc->mtx); if (sc->br == NULL) return (ENOMEM); if (bus_alloc_resources(dev, xae_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } /* Memory interface */ sc->bst = rman_get_bustag(sc->res[0]); sc->bsh = rman_get_bushandle(sc->res[0]); device_printf(sc->dev, "Identification: %x\n", READ4(sc, XAE_IDENT)); /* Get MAC addr */ if (xae_get_hwaddr(sc, sc->macaddr)) { device_printf(sc->dev, "can't get mac\n"); return (ENXIO); } /* Enable MII clock */ reg = (MDIO_CLK_DIV_DEFAULT << MDIO_SETUP_CLK_DIV_S); reg |= MDIO_SETUP_ENABLE; WRITE4(sc, XAE_MDIO_SETUP, reg); if (mdio_wait(sc)) return (ENXIO); callout_init_mtx(&sc->xae_callout, &sc->mtx, 0); /* Setup interrupt handler. */ error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE, NULL, xae_intr, sc, &sc->intr_cookie); if (error != 0) { device_printf(dev, "could not setup interrupt handler.\n"); return (ENXIO); } /* Set up the ethernet interface. */ sc->ifp = ifp = if_alloc(IFT_ETHER); if_setsoftc(ifp, sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setcapabilities(ifp, IFCAP_VLAN_MTU); if_setcapenable(ifp, if_getcapabilities(ifp)); if_settransmitfn(ifp, xae_transmit); if_setqflushfn(ifp, xae_qflush); if_setioctlfn(ifp, xae_ioctl); if_setinitfn(ifp, xae_init); if_setsendqlen(ifp, TX_DESC_COUNT - 1); if_setsendqready(ifp); if (xae_get_phyaddr(node, &sc->phy_addr) != 0) return (ENXIO); /* Attach the mii driver. */ error = mii_attach(dev, &sc->miibus, ifp, xae_media_change, xae_media_status, BMSR_DEFCAPMASK, sc->phy_addr, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "PHY attach failed\n"); return (ENXIO); } sc->mii_softc = device_get_softc(sc->miibus); /* Apply vcu118 workaround. */ if (OF_getproplen(node, "xlnx,vcu118") >= 0) xae_phy_fixup(sc); /* All ready to run, attach the ethernet interface. */ ether_ifattach(ifp, sc->macaddr); sc->is_attached = true; xae_rx_enqueue(sc, NUM_RX_MBUF); xdma_queue_submit(sc->xchan_rx); return (0); } static int xae_detach(device_t dev) { struct xae_softc *sc; if_t ifp; sc = device_get_softc(dev); KASSERT(mtx_initialized(&sc->mtx), ("%s: mutex not initialized", device_get_nameunit(dev))); ifp = sc->ifp; /* Only cleanup if attach succeeded. */ if (device_is_attached(dev)) { XAE_LOCK(sc); xae_stop_locked(sc); XAE_UNLOCK(sc); callout_drain(&sc->xae_callout); ether_ifdetach(ifp); } - if (sc->miibus != NULL) - device_delete_child(dev, sc->miibus); + bus_generic_detach(dev); if (ifp != NULL) if_free(ifp); mtx_destroy(&sc->mtx); bus_teardown_intr(dev, sc->res[1], sc->intr_cookie); bus_release_resources(dev, xae_spec, sc->res); xdma_channel_free(sc->xchan_tx); xdma_channel_free(sc->xchan_rx); xdma_put(sc->xdma_tx); xdma_put(sc->xdma_rx); return (0); } static void xae_miibus_statchg(device_t dev) { struct xae_softc *sc; struct mii_data *mii; uint32_t reg; /* * Called by the MII bus driver when the PHY establishes * link to set the MAC interface registers. */ sc = device_get_softc(dev); XAE_ASSERT_LOCKED(sc); mii = sc->mii_softc; if (mii->mii_media_status & IFM_ACTIVE) sc->link_is_up = true; else sc->link_is_up = false; switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: case IFM_1000_SX: reg = SPEED_1000; break; case IFM_100_TX: reg = SPEED_100; break; case IFM_10_T: reg = SPEED_10; break; case IFM_NONE: sc->link_is_up = false; return; default: sc->link_is_up = false; device_printf(dev, "Unsupported media %u\n", IFM_SUBTYPE(mii->mii_media_active)); return; } WRITE4(sc, XAE_SPEED, reg); } static device_method_t xae_methods[] = { DEVMETHOD(device_probe, xae_probe), DEVMETHOD(device_attach, xae_attach), DEVMETHOD(device_detach, xae_detach), /* MII Interface */ DEVMETHOD(miibus_readreg, xae_miibus_read_reg), DEVMETHOD(miibus_writereg, xae_miibus_write_reg), DEVMETHOD(miibus_statchg, xae_miibus_statchg), { 0, 0 } }; driver_t xae_driver = { "xae", xae_methods, sizeof(struct xae_softc), }; DRIVER_MODULE(xae, simplebus, xae_driver, 0, 0); DRIVER_MODULE(miibus, xae, miibus_driver, 0, 0); MODULE_DEPEND(xae, ether, 1, 1, 1); MODULE_DEPEND(xae, miibus, 1, 1, 1);