Index: head/sys/dev/cxgb/cxgb_main.c =================================================================== --- head/sys/dev/cxgb/cxgb_main.c (revision 338948) +++ head/sys/dev/cxgb/cxgb_main.c (revision 338949) @@ -1,3655 +1,3658 @@ /************************************************************************** SPDX-License-Identifier: BSD-2-Clause-FreeBSD Copyright (c) 2007-2009, Chelsio Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Neither the name of the Chelsio Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ #include __FBSDID("$FreeBSD$"); #include "opt_inet.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef PRIV_SUPPORTED #include #endif static int cxgb_setup_interrupts(adapter_t *); static void cxgb_teardown_interrupts(adapter_t *); static void cxgb_init(void *); static int cxgb_init_locked(struct port_info *); static int cxgb_uninit_locked(struct port_info *); static int cxgb_uninit_synchronized(struct port_info *); static int cxgb_ioctl(struct ifnet *, unsigned long, caddr_t); static int cxgb_media_change(struct ifnet *); static int cxgb_ifm_type(int); static void cxgb_build_medialist(struct port_info *); static void cxgb_media_status(struct ifnet *, struct ifmediareq *); static uint64_t cxgb_get_counter(struct ifnet *, ift_counter); static int setup_sge_qsets(adapter_t *); static void cxgb_async_intr(void *); static void cxgb_tick_handler(void *, int); static void cxgb_tick(void *); static void link_check_callout(void *); static void check_link_status(void *, int); static void setup_rss(adapter_t *sc); static int alloc_filters(struct adapter *); static int setup_hw_filters(struct adapter *); static int set_filter(struct adapter *, int, const struct filter_info *); static inline void mk_set_tcb_field(struct cpl_set_tcb_field *, unsigned int, unsigned int, u64, u64); static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *, unsigned int, unsigned int, u64, u64); #ifdef TCP_OFFLOAD static int cpl_not_handled(struct sge_qset *, struct rsp_desc *, struct mbuf *); #endif /* Attachment glue for the PCI controller end of the device. Each port of * the device is attached separately, as defined later. */ static int cxgb_controller_probe(device_t); static int cxgb_controller_attach(device_t); static int cxgb_controller_detach(device_t); static void cxgb_free(struct adapter *); static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start, unsigned int end); static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf); static int cxgb_get_regs_len(void); static void touch_bars(device_t dev); static void cxgb_update_mac_settings(struct port_info *p); #ifdef TCP_OFFLOAD static int toe_capability(struct port_info *, int); #endif +/* Table for probing the cards. The desc field isn't actually used */ +struct cxgb_ident { + uint16_t vendor; + uint16_t device; + int index; + char *desc; +} cxgb_identifiers[] = { + {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"}, + {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"}, + {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"}, + {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"}, + {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"}, + {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"}, + {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"}, + {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"}, + {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"}, + {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"}, + {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"}, + {PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"}, + {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"}, + {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"}, + {0, 0, 0, NULL} +}; + static device_method_t cxgb_controller_methods[] = { DEVMETHOD(device_probe, cxgb_controller_probe), DEVMETHOD(device_attach, cxgb_controller_attach), DEVMETHOD(device_detach, cxgb_controller_detach), DEVMETHOD_END }; static driver_t cxgb_controller_driver = { "cxgbc", cxgb_controller_methods, sizeof(struct adapter) }; static int cxgbc_mod_event(module_t, int, void *); static devclass_t cxgb_controller_devclass; DRIVER_MODULE(cxgbc, pci, cxgb_controller_driver, cxgb_controller_devclass, cxgbc_mod_event, 0); +MODULE_PNP_INFO("U16:vendor;U16:device", pci, cxgbc, cxgb_identifiers, + nitems(cxgb_identifiers) - 1); MODULE_VERSION(cxgbc, 1); MODULE_DEPEND(cxgbc, firmware, 1, 1, 1); /* * Attachment glue for the ports. Attachment is done directly to the * controller device. */ static int cxgb_port_probe(device_t); static int cxgb_port_attach(device_t); static int cxgb_port_detach(device_t); static device_method_t cxgb_port_methods[] = { DEVMETHOD(device_probe, cxgb_port_probe), DEVMETHOD(device_attach, cxgb_port_attach), DEVMETHOD(device_detach, cxgb_port_detach), { 0, 0 } }; static driver_t cxgb_port_driver = { "cxgb", cxgb_port_methods, 0 }; static d_ioctl_t cxgb_extension_ioctl; static d_open_t cxgb_extension_open; static d_close_t cxgb_extension_close; static struct cdevsw cxgb_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = cxgb_extension_open, .d_close = cxgb_extension_close, .d_ioctl = cxgb_extension_ioctl, .d_name = "cxgb", }; static devclass_t cxgb_port_devclass; DRIVER_MODULE(cxgb, cxgbc, cxgb_port_driver, cxgb_port_devclass, 0, 0); MODULE_VERSION(cxgb, 1); NETDUMP_DEFINE(cxgb); static struct mtx t3_list_lock; static SLIST_HEAD(, adapter) t3_list; #ifdef TCP_OFFLOAD static struct mtx t3_uld_list_lock; static SLIST_HEAD(, uld_info) t3_uld_list; #endif /* * The driver uses the best interrupt scheme available on a platform in the * order MSI-X, MSI, legacy pin interrupts. This parameter determines which * of these schemes the driver may consider as follows: * * msi = 2: choose from among all three options * msi = 1 : only consider MSI and pin interrupts * msi = 0: force pin interrupts */ static int msi_allowed = 2; SYSCTL_NODE(_hw, OID_AUTO, cxgb, CTLFLAG_RD, 0, "CXGB driver parameters"); SYSCTL_INT(_hw_cxgb, OID_AUTO, msi_allowed, CTLFLAG_RDTUN, &msi_allowed, 0, "MSI-X, MSI, INTx selector"); /* * The driver uses an auto-queue algorithm by default. * To disable it and force a single queue-set per port, use multiq = 0 */ static int multiq = 1; SYSCTL_INT(_hw_cxgb, OID_AUTO, multiq, CTLFLAG_RDTUN, &multiq, 0, "use min(ncpus/ports, 8) queue-sets per port"); /* * By default the driver will not update the firmware unless * it was compiled against a newer version * */ static int force_fw_update = 0; SYSCTL_INT(_hw_cxgb, OID_AUTO, force_fw_update, CTLFLAG_RDTUN, &force_fw_update, 0, "update firmware even if up to date"); int cxgb_use_16k_clusters = -1; SYSCTL_INT(_hw_cxgb, OID_AUTO, use_16k_clusters, CTLFLAG_RDTUN, &cxgb_use_16k_clusters, 0, "use 16kB clusters for the jumbo queue "); static int nfilters = -1; SYSCTL_INT(_hw_cxgb, OID_AUTO, nfilters, CTLFLAG_RDTUN, &nfilters, 0, "max number of entries in the filter table"); enum { MAX_TXQ_ENTRIES = 16384, MAX_CTRL_TXQ_ENTRIES = 1024, MAX_RSPQ_ENTRIES = 16384, MAX_RX_BUFFERS = 16384, MAX_RX_JUMBO_BUFFERS = 16384, MIN_TXQ_ENTRIES = 4, MIN_CTRL_TXQ_ENTRIES = 4, MIN_RSPQ_ENTRIES = 32, MIN_FL_ENTRIES = 32, MIN_FL_JUMBO_ENTRIES = 32 }; struct filter_info { u32 sip; u32 sip_mask; u32 dip; u16 sport; u16 dport; u32 vlan:12; u32 vlan_prio:3; u32 mac_hit:1; u32 mac_idx:4; u32 mac_vld:1; u32 pkt_type:2; u32 report_filter_id:1; u32 pass:1; u32 rss:1; u32 qset:3; u32 locked:1; u32 valid:1; }; enum { FILTER_NO_VLAN_PRI = 7 }; #define EEPROM_MAGIC 0x38E2F10C #define PORT_MASK ((1 << MAX_NPORTS) - 1) -/* Table for probing the cards. The desc field isn't actually used */ -struct cxgb_ident { - uint16_t vendor; - uint16_t device; - int index; - char *desc; -} cxgb_identifiers[] = { - {PCI_VENDOR_ID_CHELSIO, 0x0020, 0, "PE9000"}, - {PCI_VENDOR_ID_CHELSIO, 0x0021, 1, "T302E"}, - {PCI_VENDOR_ID_CHELSIO, 0x0022, 2, "T310E"}, - {PCI_VENDOR_ID_CHELSIO, 0x0023, 3, "T320X"}, - {PCI_VENDOR_ID_CHELSIO, 0x0024, 1, "T302X"}, - {PCI_VENDOR_ID_CHELSIO, 0x0025, 3, "T320E"}, - {PCI_VENDOR_ID_CHELSIO, 0x0026, 2, "T310X"}, - {PCI_VENDOR_ID_CHELSIO, 0x0030, 2, "T3B10"}, - {PCI_VENDOR_ID_CHELSIO, 0x0031, 3, "T3B20"}, - {PCI_VENDOR_ID_CHELSIO, 0x0032, 1, "T3B02"}, - {PCI_VENDOR_ID_CHELSIO, 0x0033, 4, "T3B04"}, - {PCI_VENDOR_ID_CHELSIO, 0x0035, 6, "T3C10"}, - {PCI_VENDOR_ID_CHELSIO, 0x0036, 3, "S320E-CR"}, - {PCI_VENDOR_ID_CHELSIO, 0x0037, 7, "N320E-G2"}, - {0, 0, 0, NULL} -}; static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset); static __inline char t3rev2char(struct adapter *adapter) { char rev = 'z'; switch(adapter->params.rev) { case T3_REV_A: rev = 'a'; break; case T3_REV_B: case T3_REV_B2: rev = 'b'; break; case T3_REV_C: rev = 'c'; break; } return rev; } static struct cxgb_ident * cxgb_get_ident(device_t dev) { struct cxgb_ident *id; for (id = cxgb_identifiers; id->desc != NULL; id++) { if ((id->vendor == pci_get_vendor(dev)) && (id->device == pci_get_device(dev))) { return (id); } } return (NULL); } static const struct adapter_info * cxgb_get_adapter_info(device_t dev) { struct cxgb_ident *id; const struct adapter_info *ai; id = cxgb_get_ident(dev); if (id == NULL) return (NULL); ai = t3_get_adapter_info(id->index); return (ai); } static int cxgb_controller_probe(device_t dev) { const struct adapter_info *ai; char *ports, buf[80]; int nports; ai = cxgb_get_adapter_info(dev); if (ai == NULL) return (ENXIO); nports = ai->nports0 + ai->nports1; if (nports == 1) ports = "port"; else ports = "ports"; snprintf(buf, sizeof(buf), "%s, %d %s", ai->desc, nports, ports); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } #define FW_FNAME "cxgb_t3fw" #define TPEEPROM_NAME "cxgb_t3%c_tp_eeprom" #define TPSRAM_NAME "cxgb_t3%c_protocol_sram" static int upgrade_fw(adapter_t *sc) { const struct firmware *fw; int status; u32 vers; if ((fw = firmware_get(FW_FNAME)) == NULL) { device_printf(sc->dev, "Could not find firmware image %s\n", FW_FNAME); return (ENOENT); } else device_printf(sc->dev, "installing firmware on card\n"); status = t3_load_fw(sc, (const uint8_t *)fw->data, fw->datasize); if (status != 0) { device_printf(sc->dev, "failed to install firmware: %d\n", status); } else { t3_get_fw_version(sc, &vers); snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers)); } firmware_put(fw, FIRMWARE_UNLOAD); return (status); } /* * The cxgb_controller_attach function is responsible for the initial * bringup of the device. Its responsibilities include: * * 1. Determine if the device supports MSI or MSI-X. * 2. Allocate bus resources so that we can access the Base Address Register * 3. Create and initialize mutexes for the controller and its control * logic such as SGE and MDIO. * 4. Call hardware specific setup routine for the adapter as a whole. * 5. Allocate the BAR for doing MSI-X. * 6. Setup the line interrupt iff MSI-X is not supported. * 7. Create the driver's taskq. * 8. Start one task queue service thread. * 9. Check if the firmware and SRAM are up-to-date. They will be * auto-updated later (before FULL_INIT_DONE), if required. * 10. Create a child device for each MAC (port) * 11. Initialize T3 private state. * 12. Trigger the LED * 13. Setup offload iff supported. * 14. Reset/restart the tick callout. * 15. Attach sysctls * * NOTE: Any modification or deviation from this list MUST be reflected in * the above comment. Failure to do so will result in problems on various * error conditions including link flapping. */ static int cxgb_controller_attach(device_t dev) { device_t child; const struct adapter_info *ai; struct adapter *sc; int i, error = 0; uint32_t vers; int port_qsets = 1; int msi_needed, reg; char buf[80]; sc = device_get_softc(dev); sc->dev = dev; sc->msi_count = 0; ai = cxgb_get_adapter_info(dev); snprintf(sc->lockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb controller lock %d", device_get_unit(dev)); ADAPTER_LOCK_INIT(sc, sc->lockbuf); snprintf(sc->reglockbuf, ADAPTER_LOCK_NAME_LEN, "SGE reg lock %d", device_get_unit(dev)); snprintf(sc->mdiolockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb mdio lock %d", device_get_unit(dev)); snprintf(sc->elmerlockbuf, ADAPTER_LOCK_NAME_LEN, "cxgb elmer lock %d", device_get_unit(dev)); MTX_INIT(&sc->sge.reg_lock, sc->reglockbuf, NULL, MTX_SPIN); MTX_INIT(&sc->mdio_lock, sc->mdiolockbuf, NULL, MTX_DEF); MTX_INIT(&sc->elmer_lock, sc->elmerlockbuf, NULL, MTX_DEF); mtx_lock(&t3_list_lock); SLIST_INSERT_HEAD(&t3_list, sc, link); mtx_unlock(&t3_list_lock); /* find the PCIe link width and set max read request to 4KB*/ if (pci_find_cap(dev, PCIY_EXPRESS, ®) == 0) { uint16_t lnk; lnk = pci_read_config(dev, reg + PCIER_LINK_STA, 2); sc->link_width = (lnk & PCIEM_LINK_STA_WIDTH) >> 4; if (sc->link_width < 8 && (ai->caps & SUPPORTED_10000baseT_Full)) { device_printf(sc->dev, "PCIe x%d Link, expect reduced performance\n", sc->link_width); } pci_set_max_read_req(dev, 4096); } touch_bars(dev); pci_enable_busmaster(dev); /* * Allocate the registers and make them available to the driver. * The registers that we care about for NIC mode are in BAR 0 */ sc->regs_rid = PCIR_BAR(0); if ((sc->regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->regs_rid, RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate BAR region 0\n"); error = ENXIO; goto out; } sc->bt = rman_get_bustag(sc->regs_res); sc->bh = rman_get_bushandle(sc->regs_res); sc->mmio_len = rman_get_size(sc->regs_res); for (i = 0; i < MAX_NPORTS; i++) sc->port[i].adapter = sc; if (t3_prep_adapter(sc, ai, 1) < 0) { printf("prep adapter failed\n"); error = ENODEV; goto out; } sc->udbs_rid = PCIR_BAR(2); sc->udbs_res = NULL; if (is_offload(sc) && ((sc->udbs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->udbs_rid, RF_ACTIVE)) == NULL)) { device_printf(dev, "Cannot allocate BAR region 1\n"); error = ENXIO; goto out; } /* Allocate the BAR for doing MSI-X. If it succeeds, try to allocate * enough messages for the queue sets. If that fails, try falling * back to MSI. If that fails, then try falling back to the legacy * interrupt pin model. */ sc->msix_regs_rid = 0x20; if ((msi_allowed >= 2) && (sc->msix_regs_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->msix_regs_rid, RF_ACTIVE)) != NULL) { if (multiq) port_qsets = min(SGE_QSETS/sc->params.nports, mp_ncpus); msi_needed = sc->msi_count = sc->params.nports * port_qsets + 1; if (pci_msix_count(dev) == 0 || (error = pci_alloc_msix(dev, &sc->msi_count)) != 0 || sc->msi_count != msi_needed) { device_printf(dev, "alloc msix failed - " "msi_count=%d, msi_needed=%d, err=%d; " "will try MSI\n", sc->msi_count, msi_needed, error); sc->msi_count = 0; port_qsets = 1; pci_release_msi(dev); bus_release_resource(dev, SYS_RES_MEMORY, sc->msix_regs_rid, sc->msix_regs_res); sc->msix_regs_res = NULL; } else { sc->flags |= USING_MSIX; sc->cxgb_intr = cxgb_async_intr; device_printf(dev, "using MSI-X interrupts (%u vectors)\n", sc->msi_count); } } if ((msi_allowed >= 1) && (sc->msi_count == 0)) { sc->msi_count = 1; if ((error = pci_alloc_msi(dev, &sc->msi_count)) != 0) { device_printf(dev, "alloc msi failed - " "err=%d; will try INTx\n", error); sc->msi_count = 0; port_qsets = 1; pci_release_msi(dev); } else { sc->flags |= USING_MSI; sc->cxgb_intr = t3_intr_msi; device_printf(dev, "using MSI interrupts\n"); } } if (sc->msi_count == 0) { device_printf(dev, "using line interrupts\n"); sc->cxgb_intr = t3b_intr; } /* Create a private taskqueue thread for handling driver events */ sc->tq = taskqueue_create("cxgb_taskq", M_NOWAIT, taskqueue_thread_enqueue, &sc->tq); if (sc->tq == NULL) { device_printf(dev, "failed to allocate controller task queue\n"); goto out; } taskqueue_start_threads(&sc->tq, 1, PI_NET, "%s taskq", device_get_nameunit(dev)); TASK_INIT(&sc->tick_task, 0, cxgb_tick_handler, sc); /* Create a periodic callout for checking adapter status */ callout_init(&sc->cxgb_tick_ch, 1); if (t3_check_fw_version(sc) < 0 || force_fw_update) { /* * Warn user that a firmware update will be attempted in init. */ device_printf(dev, "firmware needs to be updated to version %d.%d.%d\n", FW_VERSION_MAJOR, FW_VERSION_MINOR, FW_VERSION_MICRO); sc->flags &= ~FW_UPTODATE; } else { sc->flags |= FW_UPTODATE; } if (t3_check_tpsram_version(sc) < 0) { /* * Warn user that a firmware update will be attempted in init. */ device_printf(dev, "SRAM needs to be updated to version %c-%d.%d.%d\n", t3rev2char(sc), TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); sc->flags &= ~TPS_UPTODATE; } else { sc->flags |= TPS_UPTODATE; } /* * Create a child device for each MAC. The ethernet attachment * will be done in these children. */ for (i = 0; i < (sc)->params.nports; i++) { struct port_info *pi; if ((child = device_add_child(dev, "cxgb", -1)) == NULL) { device_printf(dev, "failed to add child port\n"); error = EINVAL; goto out; } pi = &sc->port[i]; pi->adapter = sc; pi->nqsets = port_qsets; pi->first_qset = i*port_qsets; pi->port_id = i; pi->tx_chan = i >= ai->nports0; pi->txpkt_intf = pi->tx_chan ? 2 * (i - ai->nports0) + 1 : 2 * i; sc->rxpkt_map[pi->txpkt_intf] = i; sc->port[i].tx_chan = i >= ai->nports0; sc->portdev[i] = child; device_set_softc(child, pi); } if ((error = bus_generic_attach(dev)) != 0) goto out; /* initialize sge private state */ t3_sge_init_adapter(sc); t3_led_ready(sc); error = t3_get_fw_version(sc, &vers); if (error) goto out; snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers)); snprintf(buf, sizeof(buf), "%s %sNIC\t E/C: %s S/N: %s", ai->desc, is_offload(sc) ? "R" : "", sc->params.vpd.ec, sc->params.vpd.sn); device_set_desc_copy(dev, buf); snprintf(&sc->port_types[0], sizeof(sc->port_types), "%x%x%x%x", sc->params.vpd.port_type[0], sc->params.vpd.port_type[1], sc->params.vpd.port_type[2], sc->params.vpd.port_type[3]); device_printf(sc->dev, "Firmware Version %s\n", &sc->fw_version[0]); callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); t3_add_attach_sysctls(sc); #ifdef TCP_OFFLOAD for (i = 0; i < NUM_CPL_HANDLERS; i++) sc->cpl_handler[i] = cpl_not_handled; #endif t3_intr_clear(sc); error = cxgb_setup_interrupts(sc); out: if (error) cxgb_free(sc); return (error); } /* * The cxgb_controller_detach routine is called with the device is * unloaded from the system. */ static int cxgb_controller_detach(device_t dev) { struct adapter *sc; sc = device_get_softc(dev); cxgb_free(sc); return (0); } /* * The cxgb_free() is called by the cxgb_controller_detach() routine * to tear down the structures that were built up in * cxgb_controller_attach(), and should be the final piece of work * done when fully unloading the driver. * * * 1. Shutting down the threads started by the cxgb_controller_attach() * routine. * 2. Stopping the lower level device and all callouts (cxgb_down_locked()). * 3. Detaching all of the port devices created during the * cxgb_controller_attach() routine. * 4. Removing the device children created via cxgb_controller_attach(). * 5. Releasing PCI resources associated with the device. * 6. Turning off the offload support, iff it was turned on. * 7. Destroying the mutexes created in cxgb_controller_attach(). * */ static void cxgb_free(struct adapter *sc) { int i, nqsets = 0; ADAPTER_LOCK(sc); sc->flags |= CXGB_SHUTDOWN; ADAPTER_UNLOCK(sc); /* * Make sure all child devices are gone. */ bus_generic_detach(sc->dev); for (i = 0; i < (sc)->params.nports; i++) { if (sc->portdev[i] && device_delete_child(sc->dev, sc->portdev[i]) != 0) device_printf(sc->dev, "failed to delete child port\n"); nqsets += sc->port[i].nqsets; } /* * At this point, it is as if cxgb_port_detach has run on all ports, and * cxgb_down has run on the adapter. All interrupts have been silenced, * all open devices have been closed. */ KASSERT(sc->open_device_map == 0, ("%s: device(s) still open (%x)", __func__, sc->open_device_map)); for (i = 0; i < sc->params.nports; i++) { KASSERT(sc->port[i].ifp == NULL, ("%s: port %i undead!", __func__, i)); } /* * Finish off the adapter's callouts. */ callout_drain(&sc->cxgb_tick_ch); callout_drain(&sc->sge_timer_ch); /* * Release resources grabbed under FULL_INIT_DONE by cxgb_up. The * sysctls are cleaned up by the kernel linker. */ if (sc->flags & FULL_INIT_DONE) { t3_free_sge_resources(sc, nqsets); sc->flags &= ~FULL_INIT_DONE; } /* * Release all interrupt resources. */ cxgb_teardown_interrupts(sc); if (sc->flags & (USING_MSI | USING_MSIX)) { device_printf(sc->dev, "releasing msi message(s)\n"); pci_release_msi(sc->dev); } else { device_printf(sc->dev, "no msi message to release\n"); } if (sc->msix_regs_res != NULL) { bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->msix_regs_rid, sc->msix_regs_res); } /* * Free the adapter's taskqueue. */ if (sc->tq != NULL) { taskqueue_free(sc->tq); sc->tq = NULL; } free(sc->filters, M_DEVBUF); t3_sge_free(sc); if (sc->udbs_res != NULL) bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->udbs_rid, sc->udbs_res); if (sc->regs_res != NULL) bus_release_resource(sc->dev, SYS_RES_MEMORY, sc->regs_rid, sc->regs_res); MTX_DESTROY(&sc->mdio_lock); MTX_DESTROY(&sc->sge.reg_lock); MTX_DESTROY(&sc->elmer_lock); mtx_lock(&t3_list_lock); SLIST_REMOVE(&t3_list, sc, adapter, link); mtx_unlock(&t3_list_lock); ADAPTER_LOCK_DEINIT(sc); } /** * setup_sge_qsets - configure SGE Tx/Rx/response queues * @sc: the controller softc * * Determines how many sets of SGE queues to use and initializes them. * We support multiple queue sets per port if we have MSI-X, otherwise * just one queue set per port. */ static int setup_sge_qsets(adapter_t *sc) { int i, j, err, irq_idx = 0, qset_idx = 0; u_int ntxq = SGE_TXQ_PER_SET; if ((err = t3_sge_alloc(sc)) != 0) { device_printf(sc->dev, "t3_sge_alloc returned %d\n", err); return (err); } if (sc->params.rev > 0 && !(sc->flags & USING_MSI)) irq_idx = -1; for (i = 0; i < (sc)->params.nports; i++) { struct port_info *pi = &sc->port[i]; for (j = 0; j < pi->nqsets; j++, qset_idx++) { err = t3_sge_alloc_qset(sc, qset_idx, (sc)->params.nports, (sc->flags & USING_MSIX) ? qset_idx + 1 : irq_idx, &sc->params.sge.qset[qset_idx], ntxq, pi); if (err) { t3_free_sge_resources(sc, qset_idx); device_printf(sc->dev, "t3_sge_alloc_qset failed with %d\n", err); return (err); } } } return (0); } static void cxgb_teardown_interrupts(adapter_t *sc) { int i; for (i = 0; i < SGE_QSETS; i++) { if (sc->msix_intr_tag[i] == NULL) { /* Should have been setup fully or not at all */ KASSERT(sc->msix_irq_res[i] == NULL && sc->msix_irq_rid[i] == 0, ("%s: half-done interrupt (%d).", __func__, i)); continue; } bus_teardown_intr(sc->dev, sc->msix_irq_res[i], sc->msix_intr_tag[i]); bus_release_resource(sc->dev, SYS_RES_IRQ, sc->msix_irq_rid[i], sc->msix_irq_res[i]); sc->msix_irq_res[i] = sc->msix_intr_tag[i] = NULL; sc->msix_irq_rid[i] = 0; } if (sc->intr_tag) { KASSERT(sc->irq_res != NULL, ("%s: half-done interrupt.", __func__)); bus_teardown_intr(sc->dev, sc->irq_res, sc->intr_tag); bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = sc->intr_tag = NULL; sc->irq_rid = 0; } } static int cxgb_setup_interrupts(adapter_t *sc) { struct resource *res; void *tag; int i, rid, err, intr_flag = sc->flags & (USING_MSI | USING_MSIX); sc->irq_rid = intr_flag ? 1 : 0; sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irq_rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(sc->dev, "Cannot allocate interrupt (%x, %u)\n", intr_flag, sc->irq_rid); err = EINVAL; sc->irq_rid = 0; } else { err = bus_setup_intr(sc->dev, sc->irq_res, INTR_MPSAFE | INTR_TYPE_NET, NULL, sc->cxgb_intr, sc, &sc->intr_tag); if (err) { device_printf(sc->dev, "Cannot set up interrupt (%x, %u, %d)\n", intr_flag, sc->irq_rid, err); bus_release_resource(sc->dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); sc->irq_res = sc->intr_tag = NULL; sc->irq_rid = 0; } } /* That's all for INTx or MSI */ if (!(intr_flag & USING_MSIX) || err) return (err); bus_describe_intr(sc->dev, sc->irq_res, sc->intr_tag, "err"); for (i = 0; i < sc->msi_count - 1; i++) { rid = i + 2; res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (res == NULL) { device_printf(sc->dev, "Cannot allocate interrupt " "for message %d\n", rid); err = EINVAL; break; } err = bus_setup_intr(sc->dev, res, INTR_MPSAFE | INTR_TYPE_NET, NULL, t3_intr_msix, &sc->sge.qs[i], &tag); if (err) { device_printf(sc->dev, "Cannot set up interrupt " "for message %d (%d)\n", rid, err); bus_release_resource(sc->dev, SYS_RES_IRQ, rid, res); break; } sc->msix_irq_rid[i] = rid; sc->msix_irq_res[i] = res; sc->msix_intr_tag[i] = tag; bus_describe_intr(sc->dev, res, tag, "qs%d", i); } if (err) cxgb_teardown_interrupts(sc); return (err); } static int cxgb_port_probe(device_t dev) { struct port_info *p; char buf[80]; const char *desc; p = device_get_softc(dev); desc = p->phy.desc; snprintf(buf, sizeof(buf), "Port %d %s", p->port_id, desc); device_set_desc_copy(dev, buf); return (0); } static int cxgb_makedev(struct port_info *pi) { pi->port_cdev = make_dev(&cxgb_cdevsw, pi->ifp->if_dunit, UID_ROOT, GID_WHEEL, 0600, "%s", if_name(pi->ifp)); if (pi->port_cdev == NULL) return (ENOMEM); pi->port_cdev->si_drv1 = (void *)pi; return (0); } #define CXGB_CAP (IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | \ IFCAP_VLAN_HWCSUM | IFCAP_TSO | IFCAP_JUMBO_MTU | IFCAP_LRO | \ IFCAP_VLAN_HWTSO | IFCAP_LINKSTATE | IFCAP_HWCSUM_IPV6) #define CXGB_CAP_ENABLE CXGB_CAP static int cxgb_port_attach(device_t dev) { struct port_info *p; struct ifnet *ifp; int err; struct adapter *sc; p = device_get_softc(dev); sc = p->adapter; snprintf(p->lockbuf, PORT_NAME_LEN, "cxgb port lock %d:%d", device_get_unit(device_get_parent(dev)), p->port_id); PORT_LOCK_INIT(p, p->lockbuf); callout_init(&p->link_check_ch, 1); TASK_INIT(&p->link_check_task, 0, check_link_status, p); /* Allocate an ifnet object and set it up */ ifp = p->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "Cannot allocate ifnet\n"); return (ENOMEM); } if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_init = cxgb_init; ifp->if_softc = p; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = cxgb_ioctl; ifp->if_transmit = cxgb_transmit; ifp->if_qflush = cxgb_qflush; ifp->if_get_counter = cxgb_get_counter; ifp->if_capabilities = CXGB_CAP; #ifdef TCP_OFFLOAD if (is_offload(sc)) ifp->if_capabilities |= IFCAP_TOE4; #endif ifp->if_capenable = CXGB_CAP_ENABLE; ifp->if_hwassist = CSUM_TCP | CSUM_UDP | CSUM_IP | CSUM_TSO | CSUM_UDP_IPV6 | CSUM_TCP_IPV6; /* * Disable TSO on 4-port - it isn't supported by the firmware. */ if (sc->params.nports > 2) { ifp->if_capabilities &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO); ifp->if_capenable &= ~(IFCAP_TSO | IFCAP_VLAN_HWTSO); ifp->if_hwassist &= ~CSUM_TSO; } ether_ifattach(ifp, p->hw_addr); /* Attach driver netdump methods. */ NETDUMP_SET(ifp, cxgb); #ifdef DEFAULT_JUMBO if (sc->params.nports <= 2) ifp->if_mtu = ETHERMTU_JUMBO; #endif if ((err = cxgb_makedev(p)) != 0) { printf("makedev failed %d\n", err); return (err); } /* Create a list of media supported by this port */ ifmedia_init(&p->media, IFM_IMASK, cxgb_media_change, cxgb_media_status); cxgb_build_medialist(p); t3_sge_init_port(p); return (err); } /* * cxgb_port_detach() is called via the device_detach methods when * cxgb_free() calls the bus_generic_detach. It is responsible for * removing the device from the view of the kernel, i.e. from all * interfaces lists etc. This routine is only called when the driver is * being unloaded, not when the link goes down. */ static int cxgb_port_detach(device_t dev) { struct port_info *p; struct adapter *sc; int i; p = device_get_softc(dev); sc = p->adapter; /* Tell cxgb_ioctl and if_init that the port is going away */ ADAPTER_LOCK(sc); SET_DOOMED(p); wakeup(&sc->flags); while (IS_BUSY(sc)) mtx_sleep(&sc->flags, &sc->lock, 0, "cxgbdtch", 0); SET_BUSY(sc); ADAPTER_UNLOCK(sc); if (p->port_cdev != NULL) destroy_dev(p->port_cdev); cxgb_uninit_synchronized(p); ether_ifdetach(p->ifp); for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { struct sge_qset *qs = &sc->sge.qs[i]; struct sge_txq *txq = &qs->txq[TXQ_ETH]; callout_drain(&txq->txq_watchdog); callout_drain(&txq->txq_timer); } PORT_LOCK_DEINIT(p); if_free(p->ifp); p->ifp = NULL; ADAPTER_LOCK(sc); CLR_BUSY(sc); wakeup_one(&sc->flags); ADAPTER_UNLOCK(sc); return (0); } void t3_fatal_err(struct adapter *sc) { u_int fw_status[4]; if (sc->flags & FULL_INIT_DONE) { t3_sge_stop(sc); t3_write_reg(sc, A_XGM_TX_CTRL, 0); t3_write_reg(sc, A_XGM_RX_CTRL, 0); t3_write_reg(sc, XGM_REG(A_XGM_TX_CTRL, 1), 0); t3_write_reg(sc, XGM_REG(A_XGM_RX_CTRL, 1), 0); t3_intr_disable(sc); } device_printf(sc->dev,"encountered fatal error, operation suspended\n"); if (!t3_cim_ctl_blk_read(sc, 0xa0, 4, fw_status)) device_printf(sc->dev, "FW_ status: 0x%x, 0x%x, 0x%x, 0x%x\n", fw_status[0], fw_status[1], fw_status[2], fw_status[3]); } int t3_os_find_pci_capability(adapter_t *sc, int cap) { device_t dev; struct pci_devinfo *dinfo; pcicfgregs *cfg; uint32_t status; uint8_t ptr; dev = sc->dev; dinfo = device_get_ivars(dev); cfg = &dinfo->cfg; status = pci_read_config(dev, PCIR_STATUS, 2); if (!(status & PCIM_STATUS_CAPPRESENT)) return (0); switch (cfg->hdrtype & PCIM_HDRTYPE) { case 0: case 1: ptr = PCIR_CAP_PTR; break; case 2: ptr = PCIR_CAP_PTR_2; break; default: return (0); break; } ptr = pci_read_config(dev, ptr, 1); while (ptr != 0) { if (pci_read_config(dev, ptr + PCICAP_ID, 1) == cap) return (ptr); ptr = pci_read_config(dev, ptr + PCICAP_NEXTPTR, 1); } return (0); } int t3_os_pci_save_state(struct adapter *sc) { device_t dev; struct pci_devinfo *dinfo; dev = sc->dev; dinfo = device_get_ivars(dev); pci_cfg_save(dev, dinfo, 0); return (0); } int t3_os_pci_restore_state(struct adapter *sc) { device_t dev; struct pci_devinfo *dinfo; dev = sc->dev; dinfo = device_get_ivars(dev); pci_cfg_restore(dev, dinfo); return (0); } /** * t3_os_link_changed - handle link status changes * @sc: the adapter associated with the link change * @port_id: the port index whose link status has changed * @link_status: the new status of the link * @speed: the new speed setting * @duplex: the new duplex setting * @fc: the new flow-control setting * * This is the OS-dependent handler for link status changes. The OS * neutral handler takes care of most of the processing for these events, * then calls this handler for any OS-specific processing. */ void t3_os_link_changed(adapter_t *adapter, int port_id, int link_status, int speed, int duplex, int fc, int mac_was_reset) { struct port_info *pi = &adapter->port[port_id]; struct ifnet *ifp = pi->ifp; /* no race with detach, so ifp should always be good */ KASSERT(ifp, ("%s: if detached.", __func__)); /* Reapply mac settings if they were lost due to a reset */ if (mac_was_reset) { PORT_LOCK(pi); cxgb_update_mac_settings(pi); PORT_UNLOCK(pi); } if (link_status) { ifp->if_baudrate = IF_Mbps(speed); if_link_state_change(ifp, LINK_STATE_UP); } else if_link_state_change(ifp, LINK_STATE_DOWN); } /** * t3_os_phymod_changed - handle PHY module changes * @phy: the PHY reporting the module change * @mod_type: new module type * * This is the OS-dependent handler for PHY module changes. It is * invoked when a PHY module is removed or inserted for any OS-specific * processing. */ void t3_os_phymod_changed(struct adapter *adap, int port_id) { static const char *mod_str[] = { NULL, "SR", "LR", "LRM", "TWINAX", "TWINAX-L", "unknown" }; struct port_info *pi = &adap->port[port_id]; int mod = pi->phy.modtype; if (mod != pi->media.ifm_cur->ifm_data) cxgb_build_medialist(pi); if (mod == phy_modtype_none) if_printf(pi->ifp, "PHY module unplugged\n"); else { KASSERT(mod < ARRAY_SIZE(mod_str), ("invalid PHY module type %d", mod)); if_printf(pi->ifp, "%s PHY module inserted\n", mod_str[mod]); } } void t3_os_set_hw_addr(adapter_t *adapter, int port_idx, u8 hw_addr[]) { /* * The ifnet might not be allocated before this gets called, * as this is called early on in attach by t3_prep_adapter * save the address off in the port structure */ if (cxgb_debug) printf("set_hw_addr on idx %d addr %6D\n", port_idx, hw_addr, ":"); bcopy(hw_addr, adapter->port[port_idx].hw_addr, ETHER_ADDR_LEN); } /* * Programs the XGMAC based on the settings in the ifnet. These settings * include MTU, MAC address, mcast addresses, etc. */ static void cxgb_update_mac_settings(struct port_info *p) { struct ifnet *ifp = p->ifp; struct t3_rx_mode rm; struct cmac *mac = &p->mac; int mtu, hwtagging; PORT_LOCK_ASSERT_OWNED(p); bcopy(IF_LLADDR(ifp), p->hw_addr, ETHER_ADDR_LEN); mtu = ifp->if_mtu; if (ifp->if_capenable & IFCAP_VLAN_MTU) mtu += ETHER_VLAN_ENCAP_LEN; hwtagging = (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0; t3_mac_set_mtu(mac, mtu); t3_set_vlan_accel(p->adapter, 1 << p->tx_chan, hwtagging); t3_mac_set_address(mac, 0, p->hw_addr); t3_init_rx_mode(&rm, p); t3_mac_set_rx_mode(mac, &rm); } static int await_mgmt_replies(struct adapter *adap, unsigned long init_cnt, unsigned long n) { int attempts = 5; while (adap->sge.qs[0].rspq.offload_pkts < init_cnt + n) { if (!--attempts) return (ETIMEDOUT); t3_os_sleep(10); } return 0; } static int init_tp_parity(struct adapter *adap) { int i; struct mbuf *m; struct cpl_set_tcb_field *greq; unsigned long cnt = adap->sge.qs[0].rspq.offload_pkts; t3_tp_set_offload_mode(adap, 1); for (i = 0; i < 16; i++) { struct cpl_smt_write_req *req; m = m_gethdr(M_WAITOK, MT_DATA); req = mtod(m, struct cpl_smt_write_req *); m->m_len = m->m_pkthdr.len = sizeof(*req); memset(req, 0, sizeof(*req)); req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, i)); req->iff = i; t3_mgmt_tx(adap, m); } for (i = 0; i < 2048; i++) { struct cpl_l2t_write_req *req; m = m_gethdr(M_WAITOK, MT_DATA); req = mtod(m, struct cpl_l2t_write_req *); m->m_len = m->m_pkthdr.len = sizeof(*req); memset(req, 0, sizeof(*req)); req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, i)); req->params = htonl(V_L2T_W_IDX(i)); t3_mgmt_tx(adap, m); } for (i = 0; i < 2048; i++) { struct cpl_rte_write_req *req; m = m_gethdr(M_WAITOK, MT_DATA); req = mtod(m, struct cpl_rte_write_req *); m->m_len = m->m_pkthdr.len = sizeof(*req); memset(req, 0, sizeof(*req)); req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RTE_WRITE_REQ, i)); req->l2t_idx = htonl(V_L2T_W_IDX(i)); t3_mgmt_tx(adap, m); } m = m_gethdr(M_WAITOK, MT_DATA); greq = mtod(m, struct cpl_set_tcb_field *); m->m_len = m->m_pkthdr.len = sizeof(*greq); memset(greq, 0, sizeof(*greq)); greq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); OPCODE_TID(greq) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, 0)); greq->mask = htobe64(1); t3_mgmt_tx(adap, m); i = await_mgmt_replies(adap, cnt, 16 + 2048 + 2048 + 1); t3_tp_set_offload_mode(adap, 0); return (i); } /** * setup_rss - configure Receive Side Steering (per-queue connection demux) * @adap: the adapter * * Sets up RSS to distribute packets to multiple receive queues. We * configure the RSS CPU lookup table to distribute to the number of HW * receive queues, and the response queue lookup table to narrow that * down to the response queues actually configured for each port. * We always configure the RSS mapping for two ports since the mapping * table has plenty of entries. */ static void setup_rss(adapter_t *adap) { int i; u_int nq[2]; uint8_t cpus[SGE_QSETS + 1]; uint16_t rspq_map[RSS_TABLE_SIZE]; for (i = 0; i < SGE_QSETS; ++i) cpus[i] = i; cpus[SGE_QSETS] = 0xff; nq[0] = nq[1] = 0; for_each_port(adap, i) { const struct port_info *pi = adap2pinfo(adap, i); nq[pi->tx_chan] += pi->nqsets; } for (i = 0; i < RSS_TABLE_SIZE / 2; ++i) { rspq_map[i] = nq[0] ? i % nq[0] : 0; rspq_map[i + RSS_TABLE_SIZE / 2] = nq[1] ? i % nq[1] + nq[0] : 0; } /* Calculate the reverse RSS map table */ for (i = 0; i < SGE_QSETS; ++i) adap->rrss_map[i] = 0xff; for (i = 0; i < RSS_TABLE_SIZE; ++i) if (adap->rrss_map[rspq_map[i]] == 0xff) adap->rrss_map[rspq_map[i]] = i; t3_config_rss(adap, F_RQFEEDBACKENABLE | F_TNLLKPEN | F_TNLMAPEN | F_TNLPRTEN | F_TNL2TUPEN | F_TNL4TUPEN | F_OFDMAPEN | F_RRCPLMAPEN | V_RRCPLCPUSIZE(6) | F_HASHTOEPLITZ, cpus, rspq_map); } static void send_pktsched_cmd(struct adapter *adap, int sched, int qidx, int lo, int hi, int port) { struct mbuf *m; struct mngt_pktsched_wr *req; m = m_gethdr(M_NOWAIT, MT_DATA); if (m) { req = mtod(m, struct mngt_pktsched_wr *); req->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_MNGT)); req->mngt_opcode = FW_MNGTOPCODE_PKTSCHED_SET; req->sched = sched; req->idx = qidx; req->min = lo; req->max = hi; req->binding = port; m->m_len = m->m_pkthdr.len = sizeof(*req); t3_mgmt_tx(adap, m); } } static void bind_qsets(adapter_t *sc) { int i, j; for (i = 0; i < (sc)->params.nports; ++i) { const struct port_info *pi = adap2pinfo(sc, i); for (j = 0; j < pi->nqsets; ++j) { send_pktsched_cmd(sc, 1, pi->first_qset + j, -1, -1, pi->tx_chan); } } } static void update_tpeeprom(struct adapter *adap) { const struct firmware *tpeeprom; uint32_t version; unsigned int major, minor; int ret, len; char rev, name[32]; t3_seeprom_read(adap, TP_SRAM_OFFSET, &version); major = G_TP_VERSION_MAJOR(version); minor = G_TP_VERSION_MINOR(version); if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR) return; rev = t3rev2char(adap); snprintf(name, sizeof(name), TPEEPROM_NAME, rev); tpeeprom = firmware_get(name); if (tpeeprom == NULL) { device_printf(adap->dev, "could not load TP EEPROM: unable to load %s\n", name); return; } len = tpeeprom->datasize - 4; ret = t3_check_tpsram(adap, tpeeprom->data, tpeeprom->datasize); if (ret) goto release_tpeeprom; if (len != TP_SRAM_LEN) { device_printf(adap->dev, "%s length is wrong len=%d expected=%d\n", name, len, TP_SRAM_LEN); return; } ret = set_eeprom(&adap->port[0], tpeeprom->data, tpeeprom->datasize, TP_SRAM_OFFSET); if (!ret) { device_printf(adap->dev, "Protocol SRAM image updated in EEPROM to %d.%d.%d\n", TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO); } else device_printf(adap->dev, "Protocol SRAM image update in EEPROM failed\n"); release_tpeeprom: firmware_put(tpeeprom, FIRMWARE_UNLOAD); return; } static int update_tpsram(struct adapter *adap) { const struct firmware *tpsram; int ret; char rev, name[32]; rev = t3rev2char(adap); snprintf(name, sizeof(name), TPSRAM_NAME, rev); update_tpeeprom(adap); tpsram = firmware_get(name); if (tpsram == NULL){ device_printf(adap->dev, "could not load TP SRAM\n"); return (EINVAL); } else device_printf(adap->dev, "updating TP SRAM\n"); ret = t3_check_tpsram(adap, tpsram->data, tpsram->datasize); if (ret) goto release_tpsram; ret = t3_set_proto_sram(adap, tpsram->data); if (ret) device_printf(adap->dev, "loading protocol SRAM failed\n"); release_tpsram: firmware_put(tpsram, FIRMWARE_UNLOAD); return ret; } /** * cxgb_up - enable the adapter * @adap: adapter being enabled * * Called when the first port is enabled, this function performs the * actions necessary to make an adapter operational, such as completing * the initialization of HW modules, and enabling interrupts. */ static int cxgb_up(struct adapter *sc) { int err = 0; unsigned int mxf = t3_mc5_size(&sc->mc5) - MC5_MIN_TIDS; KASSERT(sc->open_device_map == 0, ("%s: device(s) already open (%x)", __func__, sc->open_device_map)); if ((sc->flags & FULL_INIT_DONE) == 0) { ADAPTER_LOCK_ASSERT_NOTOWNED(sc); if ((sc->flags & FW_UPTODATE) == 0) if ((err = upgrade_fw(sc))) goto out; if ((sc->flags & TPS_UPTODATE) == 0) if ((err = update_tpsram(sc))) goto out; if (is_offload(sc) && nfilters != 0) { sc->params.mc5.nservers = 0; if (nfilters < 0) sc->params.mc5.nfilters = mxf; else sc->params.mc5.nfilters = min(nfilters, mxf); } err = t3_init_hw(sc, 0); if (err) goto out; t3_set_reg_field(sc, A_TP_PARA_REG5, 0, F_RXDDPOFFINIT); t3_write_reg(sc, A_ULPRX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); err = setup_sge_qsets(sc); if (err) goto out; alloc_filters(sc); setup_rss(sc); t3_add_configured_sysctls(sc); sc->flags |= FULL_INIT_DONE; } t3_intr_clear(sc); t3_sge_start(sc); t3_intr_enable(sc); if (sc->params.rev >= T3_REV_C && !(sc->flags & TP_PARITY_INIT) && is_offload(sc) && init_tp_parity(sc) == 0) sc->flags |= TP_PARITY_INIT; if (sc->flags & TP_PARITY_INIT) { t3_write_reg(sc, A_TP_INT_CAUSE, F_CMCACHEPERR | F_ARPLUTPERR); t3_write_reg(sc, A_TP_INT_ENABLE, 0x7fbfffff); } if (!(sc->flags & QUEUES_BOUND)) { bind_qsets(sc); setup_hw_filters(sc); sc->flags |= QUEUES_BOUND; } t3_sge_reset_adapter(sc); out: return (err); } /* * Called when the last open device is closed. Does NOT undo all of cxgb_up's * work. Specifically, the resources grabbed under FULL_INIT_DONE are released * during controller_detach, not here. */ static void cxgb_down(struct adapter *sc) { t3_sge_stop(sc); t3_intr_disable(sc); } /* * if_init for cxgb ports. */ static void cxgb_init(void *arg) { struct port_info *p = arg; struct adapter *sc = p->adapter; ADAPTER_LOCK(sc); cxgb_init_locked(p); /* releases adapter lock */ ADAPTER_LOCK_ASSERT_NOTOWNED(sc); } static int cxgb_init_locked(struct port_info *p) { struct adapter *sc = p->adapter; struct ifnet *ifp = p->ifp; struct cmac *mac = &p->mac; int i, rc = 0, may_sleep = 0, gave_up_lock = 0; ADAPTER_LOCK_ASSERT_OWNED(sc); while (!IS_DOOMED(p) && IS_BUSY(sc)) { gave_up_lock = 1; if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbinit", 0)) { rc = EINTR; goto done; } } if (IS_DOOMED(p)) { rc = ENXIO; goto done; } KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); /* * The code that runs during one-time adapter initialization can sleep * so it's important not to hold any locks across it. */ may_sleep = sc->flags & FULL_INIT_DONE ? 0 : 1; if (may_sleep) { SET_BUSY(sc); gave_up_lock = 1; ADAPTER_UNLOCK(sc); } if (sc->open_device_map == 0 && ((rc = cxgb_up(sc)) != 0)) goto done; PORT_LOCK(p); if (isset(&sc->open_device_map, p->port_id) && (ifp->if_drv_flags & IFF_DRV_RUNNING)) { PORT_UNLOCK(p); goto done; } t3_port_intr_enable(sc, p->port_id); if (!mac->multiport) t3_mac_init(mac); cxgb_update_mac_settings(p); t3_link_start(&p->phy, mac, &p->link_config); t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; PORT_UNLOCK(p); for (i = p->first_qset; i < p->first_qset + p->nqsets; i++) { struct sge_qset *qs = &sc->sge.qs[i]; struct sge_txq *txq = &qs->txq[TXQ_ETH]; callout_reset_on(&txq->txq_watchdog, hz, cxgb_tx_watchdog, qs, txq->txq_watchdog.c_cpu); } /* all ok */ setbit(&sc->open_device_map, p->port_id); callout_reset(&p->link_check_ch, p->phy.caps & SUPPORTED_LINK_IRQ ? hz * 3 : hz / 4, link_check_callout, p); done: if (may_sleep) { ADAPTER_LOCK(sc); KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); CLR_BUSY(sc); } if (gave_up_lock) wakeup_one(&sc->flags); ADAPTER_UNLOCK(sc); return (rc); } static int cxgb_uninit_locked(struct port_info *p) { struct adapter *sc = p->adapter; int rc; ADAPTER_LOCK_ASSERT_OWNED(sc); while (!IS_DOOMED(p) && IS_BUSY(sc)) { if (mtx_sleep(&sc->flags, &sc->lock, PCATCH, "cxgbunin", 0)) { rc = EINTR; goto done; } } if (IS_DOOMED(p)) { rc = ENXIO; goto done; } KASSERT(!IS_BUSY(sc), ("%s: controller busy.", __func__)); SET_BUSY(sc); ADAPTER_UNLOCK(sc); rc = cxgb_uninit_synchronized(p); ADAPTER_LOCK(sc); KASSERT(IS_BUSY(sc), ("%s: controller not busy.", __func__)); CLR_BUSY(sc); wakeup_one(&sc->flags); done: ADAPTER_UNLOCK(sc); return (rc); } /* * Called on "ifconfig down", and from port_detach */ static int cxgb_uninit_synchronized(struct port_info *pi) { struct adapter *sc = pi->adapter; struct ifnet *ifp = pi->ifp; /* * taskqueue_drain may cause a deadlock if the adapter lock is held. */ ADAPTER_LOCK_ASSERT_NOTOWNED(sc); /* * Clear this port's bit from the open device map, and then drain all * the tasks that can access/manipulate this port's port_info or ifp. * We disable this port's interrupts here and so the slow/ext * interrupt tasks won't be enqueued. The tick task will continue to * be enqueued every second but the runs after this drain will not see * this port in the open device map. * * A well behaved task must take open_device_map into account and ignore * ports that are not open. */ clrbit(&sc->open_device_map, pi->port_id); t3_port_intr_disable(sc, pi->port_id); taskqueue_drain(sc->tq, &sc->slow_intr_task); taskqueue_drain(sc->tq, &sc->tick_task); callout_drain(&pi->link_check_ch); taskqueue_drain(sc->tq, &pi->link_check_task); PORT_LOCK(pi); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); /* disable pause frames */ t3_set_reg_field(sc, A_XGM_TX_CFG + pi->mac.offset, F_TXPAUSEEN, 0); /* Reset RX FIFO HWM */ t3_set_reg_field(sc, A_XGM_RXFIFO_CFG + pi->mac.offset, V_RXFIFOPAUSEHWM(M_RXFIFOPAUSEHWM), 0); DELAY(100 * 1000); /* Wait for TXFIFO empty */ t3_wait_op_done(sc, A_XGM_TXFIFO_CFG + pi->mac.offset, F_TXFIFO_EMPTY, 1, 20, 5); DELAY(100 * 1000); t3_mac_disable(&pi->mac, MAC_DIRECTION_RX); pi->phy.ops->power_down(&pi->phy, 1); PORT_UNLOCK(pi); pi->link_config.link_ok = 0; t3_os_link_changed(sc, pi->port_id, 0, 0, 0, 0, 0); if (sc->open_device_map == 0) cxgb_down(pi->adapter); return (0); } /* * Mark lro enabled or disabled in all qsets for this port */ static int cxgb_set_lro(struct port_info *p, int enabled) { int i; struct adapter *adp = p->adapter; struct sge_qset *q; for (i = 0; i < p->nqsets; i++) { q = &adp->sge.qs[p->first_qset + i]; q->lro.enabled = (enabled != 0); } return (0); } static int cxgb_ioctl(struct ifnet *ifp, unsigned long command, caddr_t data) { struct port_info *p = ifp->if_softc; struct adapter *sc = p->adapter; struct ifreq *ifr = (struct ifreq *)data; int flags, error = 0, mtu; uint32_t mask; switch (command) { case SIOCSIFMTU: ADAPTER_LOCK(sc); error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); if (error) { fail: ADAPTER_UNLOCK(sc); return (error); } mtu = ifr->ifr_mtu; if ((mtu < ETHERMIN) || (mtu > ETHERMTU_JUMBO)) { error = EINVAL; } else { ifp->if_mtu = mtu; PORT_LOCK(p); cxgb_update_mac_settings(p); PORT_UNLOCK(p); } ADAPTER_UNLOCK(sc); break; case SIOCSIFFLAGS: ADAPTER_LOCK(sc); if (IS_DOOMED(p)) { error = ENXIO; goto fail; } if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { flags = p->if_flags; if (((ifp->if_flags ^ flags) & IFF_PROMISC) || ((ifp->if_flags ^ flags) & IFF_ALLMULTI)) { if (IS_BUSY(sc)) { error = EBUSY; goto fail; } PORT_LOCK(p); cxgb_update_mac_settings(p); PORT_UNLOCK(p); } ADAPTER_UNLOCK(sc); } else error = cxgb_init_locked(p); p->if_flags = ifp->if_flags; } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) error = cxgb_uninit_locked(p); else ADAPTER_UNLOCK(sc); ADAPTER_LOCK_ASSERT_NOTOWNED(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: ADAPTER_LOCK(sc); error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); if (error) goto fail; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { PORT_LOCK(p); cxgb_update_mac_settings(p); PORT_UNLOCK(p); } ADAPTER_UNLOCK(sc); break; case SIOCSIFCAP: ADAPTER_LOCK(sc); error = IS_DOOMED(p) ? ENXIO : (IS_BUSY(sc) ? EBUSY : 0); if (error) goto fail; mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); if (IFCAP_TSO4 & ifp->if_capenable && !(IFCAP_TXCSUM & ifp->if_capenable)) { ifp->if_capenable &= ~IFCAP_TSO4; if_printf(ifp, "tso4 disabled due to -txcsum.\n"); } } if (mask & IFCAP_TXCSUM_IPV6) { ifp->if_capenable ^= IFCAP_TXCSUM_IPV6; ifp->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); if (IFCAP_TSO6 & ifp->if_capenable && !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { ifp->if_capenable &= ~IFCAP_TSO6; if_printf(ifp, "tso6 disabled due to -txcsum6.\n"); } } if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; /* * Note that we leave CSUM_TSO alone (it is always set). The * kernel takes both IFCAP_TSOx and CSUM_TSO into account before * sending a TSO request our way, so it's sufficient to toggle * IFCAP_TSOx only. */ if (mask & IFCAP_TSO4) { if (!(IFCAP_TSO4 & ifp->if_capenable) && !(IFCAP_TXCSUM & ifp->if_capenable)) { if_printf(ifp, "enable txcsum first.\n"); error = EAGAIN; goto fail; } ifp->if_capenable ^= IFCAP_TSO4; } if (mask & IFCAP_TSO6) { if (!(IFCAP_TSO6 & ifp->if_capenable) && !(IFCAP_TXCSUM_IPV6 & ifp->if_capenable)) { if_printf(ifp, "enable txcsum6 first.\n"); error = EAGAIN; goto fail; } ifp->if_capenable ^= IFCAP_TSO6; } if (mask & IFCAP_LRO) { ifp->if_capenable ^= IFCAP_LRO; /* Safe to do this even if cxgb_up not called yet */ cxgb_set_lro(p, ifp->if_capenable & IFCAP_LRO); } #ifdef TCP_OFFLOAD if (mask & IFCAP_TOE4) { int enable = (ifp->if_capenable ^ mask) & IFCAP_TOE4; error = toe_capability(p, enable); if (error == 0) ifp->if_capenable ^= mask; } #endif if (mask & IFCAP_VLAN_HWTAGGING) { ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { PORT_LOCK(p); cxgb_update_mac_settings(p); PORT_UNLOCK(p); } } if (mask & IFCAP_VLAN_MTU) { ifp->if_capenable ^= IFCAP_VLAN_MTU; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { PORT_LOCK(p); cxgb_update_mac_settings(p); PORT_UNLOCK(p); } } if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (mask & IFCAP_VLAN_HWCSUM) ifp->if_capenable ^= IFCAP_VLAN_HWCSUM; #ifdef VLAN_CAPABILITIES VLAN_CAPABILITIES(ifp); #endif ADAPTER_UNLOCK(sc); break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &p->media, command); break; default: error = ether_ioctl(ifp, command, data); } return (error); } static int cxgb_media_change(struct ifnet *ifp) { return (EOPNOTSUPP); } /* * Translates phy->modtype to the correct Ethernet media subtype. */ static int cxgb_ifm_type(int mod) { switch (mod) { case phy_modtype_sr: return (IFM_10G_SR); case phy_modtype_lr: return (IFM_10G_LR); case phy_modtype_lrm: return (IFM_10G_LRM); case phy_modtype_twinax: return (IFM_10G_TWINAX); case phy_modtype_twinax_long: return (IFM_10G_TWINAX_LONG); case phy_modtype_none: return (IFM_NONE); case phy_modtype_unknown: return (IFM_UNKNOWN); } KASSERT(0, ("%s: modtype %d unknown", __func__, mod)); return (IFM_UNKNOWN); } /* * Rebuilds the ifmedia list for this port, and sets the current media. */ static void cxgb_build_medialist(struct port_info *p) { struct cphy *phy = &p->phy; struct ifmedia *media = &p->media; int mod = phy->modtype; int m = IFM_ETHER | IFM_FDX; PORT_LOCK(p); ifmedia_removeall(media); if (phy->caps & SUPPORTED_TP && phy->caps & SUPPORTED_Autoneg) { /* Copper (RJ45) */ if (phy->caps & SUPPORTED_10000baseT_Full) ifmedia_add(media, m | IFM_10G_T, mod, NULL); if (phy->caps & SUPPORTED_1000baseT_Full) ifmedia_add(media, m | IFM_1000_T, mod, NULL); if (phy->caps & SUPPORTED_100baseT_Full) ifmedia_add(media, m | IFM_100_TX, mod, NULL); if (phy->caps & SUPPORTED_10baseT_Full) ifmedia_add(media, m | IFM_10_T, mod, NULL); ifmedia_add(media, IFM_ETHER | IFM_AUTO, mod, NULL); ifmedia_set(media, IFM_ETHER | IFM_AUTO); } else if (phy->caps & SUPPORTED_TP) { /* Copper (CX4) */ KASSERT(phy->caps & SUPPORTED_10000baseT_Full, ("%s: unexpected cap 0x%x", __func__, phy->caps)); ifmedia_add(media, m | IFM_10G_CX4, mod, NULL); ifmedia_set(media, m | IFM_10G_CX4); } else if (phy->caps & SUPPORTED_FIBRE && phy->caps & SUPPORTED_10000baseT_Full) { /* 10G optical (but includes SFP+ twinax) */ m |= cxgb_ifm_type(mod); if (IFM_SUBTYPE(m) == IFM_NONE) m &= ~IFM_FDX; ifmedia_add(media, m, mod, NULL); ifmedia_set(media, m); } else if (phy->caps & SUPPORTED_FIBRE && phy->caps & SUPPORTED_1000baseT_Full) { /* 1G optical */ /* XXX: Lie and claim to be SX, could actually be any 1G-X */ ifmedia_add(media, m | IFM_1000_SX, mod, NULL); ifmedia_set(media, m | IFM_1000_SX); } else { KASSERT(0, ("%s: don't know how to handle 0x%x.", __func__, phy->caps)); } PORT_UNLOCK(p); } static void cxgb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) { struct port_info *p = ifp->if_softc; struct ifmedia_entry *cur = p->media.ifm_cur; int speed = p->link_config.speed; if (cur->ifm_data != p->phy.modtype) { cxgb_build_medialist(p); cur = p->media.ifm_cur; } ifmr->ifm_status = IFM_AVALID; if (!p->link_config.link_ok) return; ifmr->ifm_status |= IFM_ACTIVE; /* * active and current will differ iff current media is autoselect. That * can happen only for copper RJ45. */ if (IFM_SUBTYPE(cur->ifm_media) != IFM_AUTO) return; KASSERT(p->phy.caps & SUPPORTED_TP && p->phy.caps & SUPPORTED_Autoneg, ("%s: unexpected PHY caps 0x%x", __func__, p->phy.caps)); ifmr->ifm_active = IFM_ETHER | IFM_FDX; if (speed == SPEED_10000) ifmr->ifm_active |= IFM_10G_T; else if (speed == SPEED_1000) ifmr->ifm_active |= IFM_1000_T; else if (speed == SPEED_100) ifmr->ifm_active |= IFM_100_TX; else if (speed == SPEED_10) ifmr->ifm_active |= IFM_10_T; else KASSERT(0, ("%s: link up but speed unknown (%u)", __func__, speed)); } static uint64_t cxgb_get_counter(struct ifnet *ifp, ift_counter c) { struct port_info *pi = ifp->if_softc; struct adapter *sc = pi->adapter; struct cmac *mac = &pi->mac; struct mac_stats *mstats = &mac->stats; cxgb_refresh_stats(pi); switch (c) { case IFCOUNTER_IPACKETS: return (mstats->rx_frames); case IFCOUNTER_IERRORS: return (mstats->rx_jabber + mstats->rx_data_errs + mstats->rx_sequence_errs + mstats->rx_runt + mstats->rx_too_long + mstats->rx_mac_internal_errs + mstats->rx_short + mstats->rx_fcs_errs); case IFCOUNTER_OPACKETS: return (mstats->tx_frames); case IFCOUNTER_OERRORS: return (mstats->tx_excess_collisions + mstats->tx_underrun + mstats->tx_len_errs + mstats->tx_mac_internal_errs + mstats->tx_excess_deferral + mstats->tx_fcs_errs); case IFCOUNTER_COLLISIONS: return (mstats->tx_total_collisions); case IFCOUNTER_IBYTES: return (mstats->rx_octets); case IFCOUNTER_OBYTES: return (mstats->tx_octets); case IFCOUNTER_IMCASTS: return (mstats->rx_mcast_frames); case IFCOUNTER_OMCASTS: return (mstats->tx_mcast_frames); case IFCOUNTER_IQDROPS: return (mstats->rx_cong_drops); case IFCOUNTER_OQDROPS: { int i; uint64_t drops; drops = 0; if (sc->flags & FULL_INIT_DONE) { for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) drops += sc->sge.qs[i].txq[TXQ_ETH].txq_mr->br_drops; } return (drops); } default: return (if_get_counter_default(ifp, c)); } } static void cxgb_async_intr(void *data) { adapter_t *sc = data; t3_write_reg(sc, A_PL_INT_ENABLE0, 0); (void) t3_read_reg(sc, A_PL_INT_ENABLE0); taskqueue_enqueue(sc->tq, &sc->slow_intr_task); } static void link_check_callout(void *arg) { struct port_info *pi = arg; struct adapter *sc = pi->adapter; if (!isset(&sc->open_device_map, pi->port_id)) return; taskqueue_enqueue(sc->tq, &pi->link_check_task); } static void check_link_status(void *arg, int pending) { struct port_info *pi = arg; struct adapter *sc = pi->adapter; if (!isset(&sc->open_device_map, pi->port_id)) return; t3_link_changed(sc, pi->port_id); if (pi->link_fault || !(pi->phy.caps & SUPPORTED_LINK_IRQ) || pi->link_config.link_ok == 0) callout_reset(&pi->link_check_ch, hz, link_check_callout, pi); } void t3_os_link_intr(struct port_info *pi) { /* * Schedule a link check in the near future. If the link is flapping * rapidly we'll keep resetting the callout and delaying the check until * things stabilize a bit. */ callout_reset(&pi->link_check_ch, hz / 4, link_check_callout, pi); } static void check_t3b2_mac(struct adapter *sc) { int i; if (sc->flags & CXGB_SHUTDOWN) return; for_each_port(sc, i) { struct port_info *p = &sc->port[i]; int status; #ifdef INVARIANTS struct ifnet *ifp = p->ifp; #endif if (!isset(&sc->open_device_map, p->port_id) || p->link_fault || !p->link_config.link_ok) continue; KASSERT(ifp->if_drv_flags & IFF_DRV_RUNNING, ("%s: state mismatch (drv_flags %x, device_map %x)", __func__, ifp->if_drv_flags, sc->open_device_map)); PORT_LOCK(p); status = t3b2_mac_watchdog_task(&p->mac); if (status == 1) p->mac.stats.num_toggled++; else if (status == 2) { struct cmac *mac = &p->mac; cxgb_update_mac_settings(p); t3_link_start(&p->phy, mac, &p->link_config); t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); t3_port_intr_enable(sc, p->port_id); p->mac.stats.num_resets++; } PORT_UNLOCK(p); } } static void cxgb_tick(void *arg) { adapter_t *sc = (adapter_t *)arg; if (sc->flags & CXGB_SHUTDOWN) return; taskqueue_enqueue(sc->tq, &sc->tick_task); callout_reset(&sc->cxgb_tick_ch, hz, cxgb_tick, sc); } void cxgb_refresh_stats(struct port_info *pi) { struct timeval tv; const struct timeval interval = {0, 250000}; /* 250ms */ getmicrotime(&tv); timevalsub(&tv, &interval); if (timevalcmp(&tv, &pi->last_refreshed, <)) return; PORT_LOCK(pi); t3_mac_update_stats(&pi->mac); PORT_UNLOCK(pi); getmicrotime(&pi->last_refreshed); } static void cxgb_tick_handler(void *arg, int count) { adapter_t *sc = (adapter_t *)arg; const struct adapter_params *p = &sc->params; int i; uint32_t cause, reset; if (sc->flags & CXGB_SHUTDOWN || !(sc->flags & FULL_INIT_DONE)) return; if (p->rev == T3_REV_B2 && p->nports < 4 && sc->open_device_map) check_t3b2_mac(sc); cause = t3_read_reg(sc, A_SG_INT_CAUSE) & (F_RSPQSTARVE | F_FLEMPTY); if (cause) { struct sge_qset *qs = &sc->sge.qs[0]; uint32_t mask, v; v = t3_read_reg(sc, A_SG_RSPQ_FL_STATUS) & ~0xff00; mask = 1; for (i = 0; i < SGE_QSETS; i++) { if (v & mask) qs[i].rspq.starved++; mask <<= 1; } mask <<= SGE_QSETS; /* skip RSPQXDISABLED */ for (i = 0; i < SGE_QSETS * 2; i++) { if (v & mask) { qs[i / 2].fl[i % 2].empty++; } mask <<= 1; } /* clear */ t3_write_reg(sc, A_SG_RSPQ_FL_STATUS, v); t3_write_reg(sc, A_SG_INT_CAUSE, cause); } for (i = 0; i < sc->params.nports; i++) { struct port_info *pi = &sc->port[i]; struct cmac *mac = &pi->mac; if (!isset(&sc->open_device_map, pi->port_id)) continue; cxgb_refresh_stats(pi); if (mac->multiport) continue; /* Count rx fifo overflows, once per second */ cause = t3_read_reg(sc, A_XGM_INT_CAUSE + mac->offset); reset = 0; if (cause & F_RXFIFO_OVERFLOW) { mac->stats.rx_fifo_ovfl++; reset |= F_RXFIFO_OVERFLOW; } t3_write_reg(sc, A_XGM_INT_CAUSE + mac->offset, reset); } } static void touch_bars(device_t dev) { /* * Don't enable yet */ #if !defined(__LP64__) && 0 u32 v; pci_read_config_dword(pdev, PCI_BASE_ADDRESS_1, &v); pci_write_config_dword(pdev, PCI_BASE_ADDRESS_1, v); pci_read_config_dword(pdev, PCI_BASE_ADDRESS_3, &v); pci_write_config_dword(pdev, PCI_BASE_ADDRESS_3, v); pci_read_config_dword(pdev, PCI_BASE_ADDRESS_5, &v); pci_write_config_dword(pdev, PCI_BASE_ADDRESS_5, v); #endif } static int set_eeprom(struct port_info *pi, const uint8_t *data, int len, int offset) { uint8_t *buf; int err = 0; u32 aligned_offset, aligned_len, *p; struct adapter *adapter = pi->adapter; aligned_offset = offset & ~3; aligned_len = (len + (offset & 3) + 3) & ~3; if (aligned_offset != offset || aligned_len != len) { buf = malloc(aligned_len, M_DEVBUF, M_WAITOK|M_ZERO); if (!buf) return (ENOMEM); err = t3_seeprom_read(adapter, aligned_offset, (u32 *)buf); if (!err && aligned_len > 4) err = t3_seeprom_read(adapter, aligned_offset + aligned_len - 4, (u32 *)&buf[aligned_len - 4]); if (err) goto out; memcpy(buf + (offset & 3), data, len); } else buf = (uint8_t *)(uintptr_t)data; err = t3_seeprom_wp(adapter, 0); if (err) goto out; for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { err = t3_seeprom_write(adapter, aligned_offset, *p); aligned_offset += 4; } if (!err) err = t3_seeprom_wp(adapter, 1); out: if (buf != data) free(buf, M_DEVBUF); return err; } static int in_range(int val, int lo, int hi) { return val < 0 || (val <= hi && val >= lo); } static int cxgb_extension_open(struct cdev *dev, int flags, int fmp, struct thread *td) { return (0); } static int cxgb_extension_close(struct cdev *dev, int flags, int fmt, struct thread *td) { return (0); } static int cxgb_extension_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, struct thread *td) { int mmd, error = 0; struct port_info *pi = dev->si_drv1; adapter_t *sc = pi->adapter; #ifdef PRIV_SUPPORTED if (priv_check(td, PRIV_DRIVER)) { if (cxgb_debug) printf("user does not have access to privileged ioctls\n"); return (EPERM); } #else if (suser(td)) { if (cxgb_debug) printf("user does not have access to privileged ioctls\n"); return (EPERM); } #endif switch (cmd) { case CHELSIO_GET_MIIREG: { uint32_t val; struct cphy *phy = &pi->phy; struct ch_mii_data *mid = (struct ch_mii_data *)data; if (!phy->mdio_read) return (EOPNOTSUPP); if (is_10G(sc)) { mmd = mid->phy_id >> 8; if (!mmd) mmd = MDIO_DEV_PCS; else if (mmd > MDIO_DEV_VEND2) return (EINVAL); error = phy->mdio_read(sc, mid->phy_id & 0x1f, mmd, mid->reg_num, &val); } else error = phy->mdio_read(sc, mid->phy_id & 0x1f, 0, mid->reg_num & 0x1f, &val); if (error == 0) mid->val_out = val; break; } case CHELSIO_SET_MIIREG: { struct cphy *phy = &pi->phy; struct ch_mii_data *mid = (struct ch_mii_data *)data; if (!phy->mdio_write) return (EOPNOTSUPP); if (is_10G(sc)) { mmd = mid->phy_id >> 8; if (!mmd) mmd = MDIO_DEV_PCS; else if (mmd > MDIO_DEV_VEND2) return (EINVAL); error = phy->mdio_write(sc, mid->phy_id & 0x1f, mmd, mid->reg_num, mid->val_in); } else error = phy->mdio_write(sc, mid->phy_id & 0x1f, 0, mid->reg_num & 0x1f, mid->val_in); break; } case CHELSIO_SETREG: { struct ch_reg *edata = (struct ch_reg *)data; if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) return (EFAULT); t3_write_reg(sc, edata->addr, edata->val); break; } case CHELSIO_GETREG: { struct ch_reg *edata = (struct ch_reg *)data; if ((edata->addr & 0x3) != 0 || edata->addr >= sc->mmio_len) return (EFAULT); edata->val = t3_read_reg(sc, edata->addr); break; } case CHELSIO_GET_SGE_CONTEXT: { struct ch_cntxt *ecntxt = (struct ch_cntxt *)data; mtx_lock_spin(&sc->sge.reg_lock); switch (ecntxt->cntxt_type) { case CNTXT_TYPE_EGRESS: error = -t3_sge_read_ecntxt(sc, ecntxt->cntxt_id, ecntxt->data); break; case CNTXT_TYPE_FL: error = -t3_sge_read_fl(sc, ecntxt->cntxt_id, ecntxt->data); break; case CNTXT_TYPE_RSP: error = -t3_sge_read_rspq(sc, ecntxt->cntxt_id, ecntxt->data); break; case CNTXT_TYPE_CQ: error = -t3_sge_read_cq(sc, ecntxt->cntxt_id, ecntxt->data); break; default: error = EINVAL; break; } mtx_unlock_spin(&sc->sge.reg_lock); break; } case CHELSIO_GET_SGE_DESC: { struct ch_desc *edesc = (struct ch_desc *)data; int ret; if (edesc->queue_num >= SGE_QSETS * 6) return (EINVAL); ret = t3_get_desc(&sc->sge.qs[edesc->queue_num / 6], edesc->queue_num % 6, edesc->idx, edesc->data); if (ret < 0) return (EINVAL); edesc->size = ret; break; } case CHELSIO_GET_QSET_PARAMS: { struct qset_params *q; struct ch_qset_params *t = (struct ch_qset_params *)data; int q1 = pi->first_qset; int nqsets = pi->nqsets; int i; if (t->qset_idx >= nqsets) return EINVAL; i = q1 + t->qset_idx; q = &sc->params.sge.qset[i]; t->rspq_size = q->rspq_size; t->txq_size[0] = q->txq_size[0]; t->txq_size[1] = q->txq_size[1]; t->txq_size[2] = q->txq_size[2]; t->fl_size[0] = q->fl_size; t->fl_size[1] = q->jumbo_size; t->polling = q->polling; t->lro = q->lro; t->intr_lat = q->coalesce_usecs; t->cong_thres = q->cong_thres; t->qnum = i; if ((sc->flags & FULL_INIT_DONE) == 0) t->vector = 0; else if (sc->flags & USING_MSIX) t->vector = rman_get_start(sc->msix_irq_res[i]); else t->vector = rman_get_start(sc->irq_res); break; } case CHELSIO_GET_QSET_NUM: { struct ch_reg *edata = (struct ch_reg *)data; edata->val = pi->nqsets; break; } case CHELSIO_LOAD_FW: { uint8_t *fw_data; uint32_t vers; struct ch_mem_range *t = (struct ch_mem_range *)data; /* * You're allowed to load a firmware only before FULL_INIT_DONE * * FW_UPTODATE is also set so the rest of the initialization * will not overwrite what was loaded here. This gives you the * flexibility to load any firmware (and maybe shoot yourself in * the foot). */ ADAPTER_LOCK(sc); if (sc->open_device_map || sc->flags & FULL_INIT_DONE) { ADAPTER_UNLOCK(sc); return (EBUSY); } fw_data = malloc(t->len, M_DEVBUF, M_NOWAIT); if (!fw_data) error = ENOMEM; else error = copyin(t->buf, fw_data, t->len); if (!error) error = -t3_load_fw(sc, fw_data, t->len); if (t3_get_fw_version(sc, &vers) == 0) { snprintf(&sc->fw_version[0], sizeof(sc->fw_version), "%d.%d.%d", G_FW_VERSION_MAJOR(vers), G_FW_VERSION_MINOR(vers), G_FW_VERSION_MICRO(vers)); } if (!error) sc->flags |= FW_UPTODATE; free(fw_data, M_DEVBUF); ADAPTER_UNLOCK(sc); break; } case CHELSIO_LOAD_BOOT: { uint8_t *boot_data; struct ch_mem_range *t = (struct ch_mem_range *)data; boot_data = malloc(t->len, M_DEVBUF, M_NOWAIT); if (!boot_data) return ENOMEM; error = copyin(t->buf, boot_data, t->len); if (!error) error = -t3_load_boot(sc, boot_data, t->len); free(boot_data, M_DEVBUF); break; } case CHELSIO_GET_PM: { struct ch_pm *m = (struct ch_pm *)data; struct tp_params *p = &sc->params.tp; if (!is_offload(sc)) return (EOPNOTSUPP); m->tx_pg_sz = p->tx_pg_size; m->tx_num_pg = p->tx_num_pgs; m->rx_pg_sz = p->rx_pg_size; m->rx_num_pg = p->rx_num_pgs; m->pm_total = p->pmtx_size + p->chan_rx_size * p->nchan; break; } case CHELSIO_SET_PM: { struct ch_pm *m = (struct ch_pm *)data; struct tp_params *p = &sc->params.tp; if (!is_offload(sc)) return (EOPNOTSUPP); if (sc->flags & FULL_INIT_DONE) return (EBUSY); if (!m->rx_pg_sz || (m->rx_pg_sz & (m->rx_pg_sz - 1)) || !m->tx_pg_sz || (m->tx_pg_sz & (m->tx_pg_sz - 1))) return (EINVAL); /* not power of 2 */ if (!(m->rx_pg_sz & 0x14000)) return (EINVAL); /* not 16KB or 64KB */ if (!(m->tx_pg_sz & 0x1554000)) return (EINVAL); if (m->tx_num_pg == -1) m->tx_num_pg = p->tx_num_pgs; if (m->rx_num_pg == -1) m->rx_num_pg = p->rx_num_pgs; if (m->tx_num_pg % 24 || m->rx_num_pg % 24) return (EINVAL); if (m->rx_num_pg * m->rx_pg_sz > p->chan_rx_size || m->tx_num_pg * m->tx_pg_sz > p->chan_tx_size) return (EINVAL); p->rx_pg_size = m->rx_pg_sz; p->tx_pg_size = m->tx_pg_sz; p->rx_num_pgs = m->rx_num_pg; p->tx_num_pgs = m->tx_num_pg; break; } case CHELSIO_SETMTUTAB: { struct ch_mtus *m = (struct ch_mtus *)data; int i; if (!is_offload(sc)) return (EOPNOTSUPP); if (offload_running(sc)) return (EBUSY); if (m->nmtus != NMTUS) return (EINVAL); if (m->mtus[0] < 81) /* accommodate SACK */ return (EINVAL); /* * MTUs must be in ascending order */ for (i = 1; i < NMTUS; ++i) if (m->mtus[i] < m->mtus[i - 1]) return (EINVAL); memcpy(sc->params.mtus, m->mtus, sizeof(sc->params.mtus)); break; } case CHELSIO_GETMTUTAB: { struct ch_mtus *m = (struct ch_mtus *)data; if (!is_offload(sc)) return (EOPNOTSUPP); memcpy(m->mtus, sc->params.mtus, sizeof(m->mtus)); m->nmtus = NMTUS; break; } case CHELSIO_GET_MEM: { struct ch_mem_range *t = (struct ch_mem_range *)data; struct mc7 *mem; uint8_t *useraddr; u64 buf[32]; /* * Use these to avoid modifying len/addr in the return * struct */ uint32_t len = t->len, addr = t->addr; if (!is_offload(sc)) return (EOPNOTSUPP); if (!(sc->flags & FULL_INIT_DONE)) return (EIO); /* need the memory controllers */ if ((addr & 0x7) || (len & 0x7)) return (EINVAL); if (t->mem_id == MEM_CM) mem = &sc->cm; else if (t->mem_id == MEM_PMRX) mem = &sc->pmrx; else if (t->mem_id == MEM_PMTX) mem = &sc->pmtx; else return (EINVAL); /* * Version scheme: * bits 0..9: chip version * bits 10..15: chip revision */ t->version = 3 | (sc->params.rev << 10); /* * Read 256 bytes at a time as len can be large and we don't * want to use huge intermediate buffers. */ useraddr = (uint8_t *)t->buf; while (len) { unsigned int chunk = min(len, sizeof(buf)); error = t3_mc7_bd_read(mem, addr / 8, chunk / 8, buf); if (error) return (-error); if (copyout(buf, useraddr, chunk)) return (EFAULT); useraddr += chunk; addr += chunk; len -= chunk; } break; } case CHELSIO_READ_TCAM_WORD: { struct ch_tcam_word *t = (struct ch_tcam_word *)data; if (!is_offload(sc)) return (EOPNOTSUPP); if (!(sc->flags & FULL_INIT_DONE)) return (EIO); /* need MC5 */ return -t3_read_mc5_range(&sc->mc5, t->addr, 1, t->buf); break; } case CHELSIO_SET_TRACE_FILTER: { struct ch_trace *t = (struct ch_trace *)data; const struct trace_params *tp; tp = (const struct trace_params *)&t->sip; if (t->config_tx) t3_config_trace_filter(sc, tp, 0, t->invert_match, t->trace_tx); if (t->config_rx) t3_config_trace_filter(sc, tp, 1, t->invert_match, t->trace_rx); break; } case CHELSIO_SET_PKTSCHED: { struct ch_pktsched_params *p = (struct ch_pktsched_params *)data; if (sc->open_device_map == 0) return (EAGAIN); send_pktsched_cmd(sc, p->sched, p->idx, p->min, p->max, p->binding); break; } case CHELSIO_IFCONF_GETREGS: { struct ch_ifconf_regs *regs = (struct ch_ifconf_regs *)data; int reglen = cxgb_get_regs_len(); uint8_t *buf = malloc(reglen, M_DEVBUF, M_NOWAIT); if (buf == NULL) { return (ENOMEM); } if (regs->len > reglen) regs->len = reglen; else if (regs->len < reglen) error = ENOBUFS; if (!error) { cxgb_get_regs(sc, regs, buf); error = copyout(buf, regs->data, reglen); } free(buf, M_DEVBUF); break; } case CHELSIO_SET_HW_SCHED: { struct ch_hw_sched *t = (struct ch_hw_sched *)data; unsigned int ticks_per_usec = core_ticks_per_usec(sc); if ((sc->flags & FULL_INIT_DONE) == 0) return (EAGAIN); /* need TP to be initialized */ if (t->sched >= NTX_SCHED || !in_range(t->mode, 0, 1) || !in_range(t->channel, 0, 1) || !in_range(t->kbps, 0, 10000000) || !in_range(t->class_ipg, 0, 10000 * 65535 / ticks_per_usec) || !in_range(t->flow_ipg, 0, dack_ticks_to_usec(sc, 0x7ff))) return (EINVAL); if (t->kbps >= 0) { error = t3_config_sched(sc, t->kbps, t->sched); if (error < 0) return (-error); } if (t->class_ipg >= 0) t3_set_sched_ipg(sc, t->sched, t->class_ipg); if (t->flow_ipg >= 0) { t->flow_ipg *= 1000; /* us -> ns */ t3_set_pace_tbl(sc, &t->flow_ipg, t->sched, 1); } if (t->mode >= 0) { int bit = 1 << (S_TX_MOD_TIMER_MODE + t->sched); t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP, bit, t->mode ? bit : 0); } if (t->channel >= 0) t3_set_reg_field(sc, A_TP_TX_MOD_QUEUE_REQ_MAP, 1 << t->sched, t->channel << t->sched); break; } case CHELSIO_GET_EEPROM: { int i; struct ch_eeprom *e = (struct ch_eeprom *)data; uint8_t *buf; if (e->offset & 3 || e->offset >= EEPROMSIZE || e->len > EEPROMSIZE || e->offset + e->len > EEPROMSIZE) { return (EINVAL); } buf = malloc(EEPROMSIZE, M_DEVBUF, M_NOWAIT); if (buf == NULL) { return (ENOMEM); } e->magic = EEPROM_MAGIC; for (i = e->offset & ~3; !error && i < e->offset + e->len; i += 4) error = -t3_seeprom_read(sc, i, (uint32_t *)&buf[i]); if (!error) error = copyout(buf + e->offset, e->data, e->len); free(buf, M_DEVBUF); break; } case CHELSIO_CLEAR_STATS: { if (!(sc->flags & FULL_INIT_DONE)) return EAGAIN; PORT_LOCK(pi); t3_mac_update_stats(&pi->mac); memset(&pi->mac.stats, 0, sizeof(pi->mac.stats)); PORT_UNLOCK(pi); break; } case CHELSIO_GET_UP_LA: { struct ch_up_la *la = (struct ch_up_la *)data; uint8_t *buf = malloc(LA_BUFSIZE, M_DEVBUF, M_NOWAIT); if (buf == NULL) { return (ENOMEM); } if (la->bufsize < LA_BUFSIZE) error = ENOBUFS; if (!error) error = -t3_get_up_la(sc, &la->stopped, &la->idx, &la->bufsize, buf); if (!error) error = copyout(buf, la->data, la->bufsize); free(buf, M_DEVBUF); break; } case CHELSIO_GET_UP_IOQS: { struct ch_up_ioqs *ioqs = (struct ch_up_ioqs *)data; uint8_t *buf = malloc(IOQS_BUFSIZE, M_DEVBUF, M_NOWAIT); uint32_t *v; if (buf == NULL) { return (ENOMEM); } if (ioqs->bufsize < IOQS_BUFSIZE) error = ENOBUFS; if (!error) error = -t3_get_up_ioqs(sc, &ioqs->bufsize, buf); if (!error) { v = (uint32_t *)buf; ioqs->ioq_rx_enable = *v++; ioqs->ioq_tx_enable = *v++; ioqs->ioq_rx_status = *v++; ioqs->ioq_tx_status = *v++; error = copyout(v, ioqs->data, ioqs->bufsize); } free(buf, M_DEVBUF); break; } case CHELSIO_SET_FILTER: { struct ch_filter *f = (struct ch_filter *)data; struct filter_info *p; unsigned int nfilters = sc->params.mc5.nfilters; if (!is_offload(sc)) return (EOPNOTSUPP); /* No TCAM */ if (!(sc->flags & FULL_INIT_DONE)) return (EAGAIN); /* mc5 not setup yet */ if (nfilters == 0) return (EBUSY); /* TOE will use TCAM */ /* sanity checks */ if (f->filter_id >= nfilters || (f->val.dip && f->mask.dip != 0xffffffff) || (f->val.sport && f->mask.sport != 0xffff) || (f->val.dport && f->mask.dport != 0xffff) || (f->val.vlan && f->mask.vlan != 0xfff) || (f->val.vlan_prio && f->mask.vlan_prio != FILTER_NO_VLAN_PRI) || (f->mac_addr_idx != 0xffff && f->mac_addr_idx > 15) || f->qset >= SGE_QSETS || sc->rrss_map[f->qset] >= RSS_TABLE_SIZE) return (EINVAL); /* Was allocated with M_WAITOK */ KASSERT(sc->filters, ("filter table NULL\n")); p = &sc->filters[f->filter_id]; if (p->locked) return (EPERM); bzero(p, sizeof(*p)); p->sip = f->val.sip; p->sip_mask = f->mask.sip; p->dip = f->val.dip; p->sport = f->val.sport; p->dport = f->val.dport; p->vlan = f->mask.vlan ? f->val.vlan : 0xfff; p->vlan_prio = f->mask.vlan_prio ? (f->val.vlan_prio & 6) : FILTER_NO_VLAN_PRI; p->mac_hit = f->mac_hit; p->mac_vld = f->mac_addr_idx != 0xffff; p->mac_idx = f->mac_addr_idx; p->pkt_type = f->proto; p->report_filter_id = f->want_filter_id; p->pass = f->pass; p->rss = f->rss; p->qset = f->qset; error = set_filter(sc, f->filter_id, p); if (error == 0) p->valid = 1; break; } case CHELSIO_DEL_FILTER: { struct ch_filter *f = (struct ch_filter *)data; struct filter_info *p; unsigned int nfilters = sc->params.mc5.nfilters; if (!is_offload(sc)) return (EOPNOTSUPP); if (!(sc->flags & FULL_INIT_DONE)) return (EAGAIN); if (nfilters == 0 || sc->filters == NULL) return (EINVAL); if (f->filter_id >= nfilters) return (EINVAL); p = &sc->filters[f->filter_id]; if (p->locked) return (EPERM); if (!p->valid) return (EFAULT); /* Read "Bad address" as "Bad index" */ bzero(p, sizeof(*p)); p->sip = p->sip_mask = 0xffffffff; p->vlan = 0xfff; p->vlan_prio = FILTER_NO_VLAN_PRI; p->pkt_type = 1; error = set_filter(sc, f->filter_id, p); break; } case CHELSIO_GET_FILTER: { struct ch_filter *f = (struct ch_filter *)data; struct filter_info *p; unsigned int i, nfilters = sc->params.mc5.nfilters; if (!is_offload(sc)) return (EOPNOTSUPP); if (!(sc->flags & FULL_INIT_DONE)) return (EAGAIN); if (nfilters == 0 || sc->filters == NULL) return (EINVAL); i = f->filter_id == 0xffffffff ? 0 : f->filter_id + 1; for (; i < nfilters; i++) { p = &sc->filters[i]; if (!p->valid) continue; bzero(f, sizeof(*f)); f->filter_id = i; f->val.sip = p->sip; f->mask.sip = p->sip_mask; f->val.dip = p->dip; f->mask.dip = p->dip ? 0xffffffff : 0; f->val.sport = p->sport; f->mask.sport = p->sport ? 0xffff : 0; f->val.dport = p->dport; f->mask.dport = p->dport ? 0xffff : 0; f->val.vlan = p->vlan == 0xfff ? 0 : p->vlan; f->mask.vlan = p->vlan == 0xfff ? 0 : 0xfff; f->val.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? 0 : p->vlan_prio; f->mask.vlan_prio = p->vlan_prio == FILTER_NO_VLAN_PRI ? 0 : FILTER_NO_VLAN_PRI; f->mac_hit = p->mac_hit; f->mac_addr_idx = p->mac_vld ? p->mac_idx : 0xffff; f->proto = p->pkt_type; f->want_filter_id = p->report_filter_id; f->pass = p->pass; f->rss = p->rss; f->qset = p->qset; break; } if (i == nfilters) f->filter_id = 0xffffffff; break; } default: return (EOPNOTSUPP); break; } return (error); } static __inline void reg_block_dump(struct adapter *ap, uint8_t *buf, unsigned int start, unsigned int end) { uint32_t *p = (uint32_t *)(buf + start); for ( ; start <= end; start += sizeof(uint32_t)) *p++ = t3_read_reg(ap, start); } #define T3_REGMAP_SIZE (3 * 1024) static int cxgb_get_regs_len(void) { return T3_REGMAP_SIZE; } static void cxgb_get_regs(adapter_t *sc, struct ch_ifconf_regs *regs, uint8_t *buf) { /* * Version scheme: * bits 0..9: chip version * bits 10..15: chip revision * bit 31: set for PCIe cards */ regs->version = 3 | (sc->params.rev << 10) | (is_pcie(sc) << 31); /* * We skip the MAC statistics registers because they are clear-on-read. * Also reading multi-register stats would need to synchronize with the * periodic mac stats accumulation. Hard to justify the complexity. */ memset(buf, 0, cxgb_get_regs_len()); reg_block_dump(sc, buf, 0, A_SG_RSPQ_CREDIT_RETURN); reg_block_dump(sc, buf, A_SG_HI_DRB_HI_THRSH, A_ULPRX_PBL_ULIMIT); reg_block_dump(sc, buf, A_ULPTX_CONFIG, A_MPS_INT_CAUSE); reg_block_dump(sc, buf, A_CPL_SWITCH_CNTRL, A_CPL_MAP_TBL_DATA); reg_block_dump(sc, buf, A_SMB_GLOBAL_TIME_CFG, A_XGM_SERDES_STAT3); reg_block_dump(sc, buf, A_XGM_SERDES_STATUS0, XGM_REG(A_XGM_SERDES_STAT3, 1)); reg_block_dump(sc, buf, XGM_REG(A_XGM_SERDES_STATUS0, 1), XGM_REG(A_XGM_RX_SPI4_SOP_EOP_CNT, 1)); } static int alloc_filters(struct adapter *sc) { struct filter_info *p; unsigned int nfilters = sc->params.mc5.nfilters; if (nfilters == 0) return (0); p = malloc(sizeof(*p) * nfilters, M_DEVBUF, M_WAITOK | M_ZERO); sc->filters = p; p = &sc->filters[nfilters - 1]; p->vlan = 0xfff; p->vlan_prio = FILTER_NO_VLAN_PRI; p->pass = p->rss = p->valid = p->locked = 1; return (0); } static int setup_hw_filters(struct adapter *sc) { int i, rc; unsigned int nfilters = sc->params.mc5.nfilters; if (!sc->filters) return (0); t3_enable_filters(sc); for (i = rc = 0; i < nfilters && !rc; i++) { if (sc->filters[i].locked) rc = set_filter(sc, i, &sc->filters[i]); } return (rc); } static int set_filter(struct adapter *sc, int id, const struct filter_info *f) { int len; struct mbuf *m; struct ulp_txpkt *txpkt; struct work_request_hdr *wr; struct cpl_pass_open_req *oreq; struct cpl_set_tcb_field *sreq; len = sizeof(*wr) + sizeof(*oreq) + 2 * sizeof(*sreq); KASSERT(len <= MHLEN, ("filter request too big for an mbuf")); id += t3_mc5_size(&sc->mc5) - sc->params.mc5.nroutes - sc->params.mc5.nfilters; m = m_gethdr(M_WAITOK, MT_DATA); m->m_len = m->m_pkthdr.len = len; bzero(mtod(m, char *), len); wr = mtod(m, struct work_request_hdr *); wr->wrh_hi = htonl(V_WR_OP(FW_WROPCODE_BYPASS) | F_WR_ATOMIC); oreq = (struct cpl_pass_open_req *)(wr + 1); txpkt = (struct ulp_txpkt *)oreq; txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*oreq) / 8)); OPCODE_TID(oreq) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, id)); oreq->local_port = htons(f->dport); oreq->peer_port = htons(f->sport); oreq->local_ip = htonl(f->dip); oreq->peer_ip = htonl(f->sip); oreq->peer_netmask = htonl(f->sip_mask); oreq->opt0h = 0; oreq->opt0l = htonl(F_NO_OFFLOAD); oreq->opt1 = htonl(V_MAC_MATCH_VALID(f->mac_vld) | V_CONN_POLICY(CPL_CONN_POLICY_FILTER) | V_VLAN_PRI(f->vlan_prio >> 1) | V_VLAN_PRI_VALID(f->vlan_prio != FILTER_NO_VLAN_PRI) | V_PKT_TYPE(f->pkt_type) | V_OPT1_VLAN(f->vlan) | V_MAC_MATCH(f->mac_idx | (f->mac_hit << 4))); sreq = (struct cpl_set_tcb_field *)(oreq + 1); set_tcb_field_ulp(sreq, id, 1, 0x1800808000ULL, (f->report_filter_id << 15) | (1 << 23) | ((u64)f->pass << 35) | ((u64)!f->rss << 36)); set_tcb_field_ulp(sreq + 1, id, 0, 0xffffffff, (2 << 19) | 1); t3_mgmt_tx(sc, m); if (f->pass && !f->rss) { len = sizeof(*sreq); m = m_gethdr(M_WAITOK, MT_DATA); m->m_len = m->m_pkthdr.len = len; bzero(mtod(m, char *), len); sreq = mtod(m, struct cpl_set_tcb_field *); sreq->wr.wrh_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); mk_set_tcb_field(sreq, id, 25, 0x3f80000, (u64)sc->rrss_map[f->qset] << 19); t3_mgmt_tx(sc, m); } return 0; } static inline void mk_set_tcb_field(struct cpl_set_tcb_field *req, unsigned int tid, unsigned int word, u64 mask, u64 val) { OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid)); req->reply = V_NO_REPLY(1); req->cpu_idx = 0; req->word = htons(word); req->mask = htobe64(mask); req->val = htobe64(val); } static inline void set_tcb_field_ulp(struct cpl_set_tcb_field *req, unsigned int tid, unsigned int word, u64 mask, u64 val) { struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req; txpkt->cmd_dest = htonl(V_ULPTX_CMD(ULP_TXPKT)); txpkt->len = htonl(V_ULPTX_NFLITS(sizeof(*req) / 8)); mk_set_tcb_field(req, tid, word, mask, val); } void t3_iterate(void (*func)(struct adapter *, void *), void *arg) { struct adapter *sc; mtx_lock(&t3_list_lock); SLIST_FOREACH(sc, &t3_list, link) { /* * func should not make any assumptions about what state sc is * in - the only guarantee is that sc->sc_lock is a valid lock. */ func(sc, arg); } mtx_unlock(&t3_list_lock); } #ifdef TCP_OFFLOAD static int toe_capability(struct port_info *pi, int enable) { int rc; struct adapter *sc = pi->adapter; ADAPTER_LOCK_ASSERT_OWNED(sc); if (!is_offload(sc)) return (ENODEV); if (enable) { if (!(sc->flags & FULL_INIT_DONE)) { log(LOG_WARNING, "You must enable a cxgb interface first\n"); return (EAGAIN); } if (isset(&sc->offload_map, pi->port_id)) return (0); if (!(sc->flags & TOM_INIT_DONE)) { rc = t3_activate_uld(sc, ULD_TOM); if (rc == EAGAIN) { log(LOG_WARNING, "You must kldload t3_tom.ko before trying " "to enable TOE on a cxgb interface.\n"); } if (rc != 0) return (rc); KASSERT(sc->tom_softc != NULL, ("%s: TOM activated but softc NULL", __func__)); KASSERT(sc->flags & TOM_INIT_DONE, ("%s: TOM activated but flag not set", __func__)); } setbit(&sc->offload_map, pi->port_id); /* * XXX: Temporary code to allow iWARP to be enabled when TOE is * enabled on any port. Need to figure out how to enable, * disable, load, and unload iWARP cleanly. */ if (!isset(&sc->offload_map, MAX_NPORTS) && t3_activate_uld(sc, ULD_IWARP) == 0) setbit(&sc->offload_map, MAX_NPORTS); } else { if (!isset(&sc->offload_map, pi->port_id)) return (0); KASSERT(sc->flags & TOM_INIT_DONE, ("%s: TOM never initialized?", __func__)); clrbit(&sc->offload_map, pi->port_id); } return (0); } /* * Add an upper layer driver to the global list. */ int t3_register_uld(struct uld_info *ui) { int rc = 0; struct uld_info *u; mtx_lock(&t3_uld_list_lock); SLIST_FOREACH(u, &t3_uld_list, link) { if (u->uld_id == ui->uld_id) { rc = EEXIST; goto done; } } SLIST_INSERT_HEAD(&t3_uld_list, ui, link); ui->refcount = 0; done: mtx_unlock(&t3_uld_list_lock); return (rc); } int t3_unregister_uld(struct uld_info *ui) { int rc = EINVAL; struct uld_info *u; mtx_lock(&t3_uld_list_lock); SLIST_FOREACH(u, &t3_uld_list, link) { if (u == ui) { if (ui->refcount > 0) { rc = EBUSY; goto done; } SLIST_REMOVE(&t3_uld_list, ui, uld_info, link); rc = 0; goto done; } } done: mtx_unlock(&t3_uld_list_lock); return (rc); } int t3_activate_uld(struct adapter *sc, int id) { int rc = EAGAIN; struct uld_info *ui; mtx_lock(&t3_uld_list_lock); SLIST_FOREACH(ui, &t3_uld_list, link) { if (ui->uld_id == id) { rc = ui->activate(sc); if (rc == 0) ui->refcount++; goto done; } } done: mtx_unlock(&t3_uld_list_lock); return (rc); } int t3_deactivate_uld(struct adapter *sc, int id) { int rc = EINVAL; struct uld_info *ui; mtx_lock(&t3_uld_list_lock); SLIST_FOREACH(ui, &t3_uld_list, link) { if (ui->uld_id == id) { rc = ui->deactivate(sc); if (rc == 0) ui->refcount--; goto done; } } done: mtx_unlock(&t3_uld_list_lock); return (rc); } static int cpl_not_handled(struct sge_qset *qs __unused, struct rsp_desc *r __unused, struct mbuf *m) { m_freem(m); return (EDOOFUS); } int t3_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) { uintptr_t *loc, new; if (opcode >= NUM_CPL_HANDLERS) return (EINVAL); new = h ? (uintptr_t)h : (uintptr_t)cpl_not_handled; loc = (uintptr_t *) &sc->cpl_handler[opcode]; atomic_store_rel_ptr(loc, new); return (0); } #endif static int cxgbc_mod_event(module_t mod, int cmd, void *arg) { int rc = 0; switch (cmd) { case MOD_LOAD: mtx_init(&t3_list_lock, "T3 adapters", 0, MTX_DEF); SLIST_INIT(&t3_list); #ifdef TCP_OFFLOAD mtx_init(&t3_uld_list_lock, "T3 ULDs", 0, MTX_DEF); SLIST_INIT(&t3_uld_list); #endif break; case MOD_UNLOAD: #ifdef TCP_OFFLOAD mtx_lock(&t3_uld_list_lock); if (!SLIST_EMPTY(&t3_uld_list)) { rc = EBUSY; mtx_unlock(&t3_uld_list_lock); break; } mtx_unlock(&t3_uld_list_lock); mtx_destroy(&t3_uld_list_lock); #endif mtx_lock(&t3_list_lock); if (!SLIST_EMPTY(&t3_list)) { rc = EBUSY; mtx_unlock(&t3_list_lock); break; } mtx_unlock(&t3_list_lock); mtx_destroy(&t3_list_lock); break; } return (rc); } #ifdef NETDUMP static void cxgb_netdump_init(struct ifnet *ifp, int *nrxr, int *ncl, int *clsize) { struct port_info *pi; adapter_t *adap; pi = if_getsoftc(ifp); adap = pi->adapter; ADAPTER_LOCK(adap); *nrxr = SGE_QSETS; *ncl = adap->sge.qs[0].fl[1].size; *clsize = adap->sge.qs[0].fl[1].buf_size; ADAPTER_UNLOCK(adap); } static void cxgb_netdump_event(struct ifnet *ifp, enum netdump_ev event) { struct port_info *pi; struct sge_qset *qs; int i; pi = if_getsoftc(ifp); if (event == NETDUMP_START) for (i = 0; i < SGE_QSETS; i++) { qs = &pi->adapter->sge.qs[i]; /* Need to reinit after netdump_mbuf_dump(). */ qs->fl[0].zone = zone_pack; qs->fl[1].zone = zone_clust; qs->lro.enabled = 0; } } static int cxgb_netdump_transmit(struct ifnet *ifp, struct mbuf *m) { struct port_info *pi; struct sge_qset *qs; pi = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return (ENOENT); qs = &pi->adapter->sge.qs[pi->first_qset]; return (cxgb_netdump_encap(qs, &m)); } static int cxgb_netdump_poll(struct ifnet *ifp, int count) { struct port_info *pi; adapter_t *adap; int i; pi = if_getsoftc(ifp); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return (ENOENT); adap = pi->adapter; for (i = 0; i < SGE_QSETS; i++) (void)cxgb_netdump_poll_rx(adap, &adap->sge.qs[i]); (void)cxgb_netdump_poll_tx(&adap->sge.qs[pi->first_qset]); return (0); } #endif /* NETDUMP */ Index: head/sys/dev/ida/ida_pci.c =================================================================== --- head/sys/dev/ida/ida_pci.c (revision 338948) +++ head/sys/dev/ida/ida_pci.c (revision 338949) @@ -1,308 +1,310 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 1999,2000 Jonathan Lemon * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define IDA_PCI_MAX_DMA_ADDR 0xFFFFFFFF #define IDA_PCI_MAX_DMA_COUNT 0xFFFFFFFF #define IDA_PCI_MEMADDR PCIR_BAR(1) /* Mem I/O Address */ #define IDA_DEVICEID_SMART 0xAE100E11 #define IDA_DEVICEID_DEC_SMART 0x00461011 #define IDA_DEVICEID_NCR_53C1510 0x00101000 static int ida_v3_fifo_full(struct ida_softc *ida) { return (ida_inl(ida, R_CMD_FIFO) == 0); } static void ida_v3_submit(struct ida_softc *ida, struct ida_qcb *qcb) { ida_outl(ida, R_CMD_FIFO, qcb->hwqcb_busaddr); } static bus_addr_t ida_v3_done(struct ida_softc *ida) { bus_addr_t completed; completed = ida_inl(ida, R_DONE_FIFO); if (completed == -1) { return (0); /* fifo is empty */ } return (completed); } static int ida_v3_int_pending(struct ida_softc *ida) { return (ida_inl(ida, R_INT_PENDING)); } static void ida_v3_int_enable(struct ida_softc *ida, int enable) { if (enable) ida->flags |= IDA_INTERRUPTS; else ida->flags &= ~IDA_INTERRUPTS; ida_outl(ida, R_INT_MASK, enable ? INT_ENABLE : INT_DISABLE); } static int ida_v4_fifo_full(struct ida_softc *ida) { return (ida_inl(ida, R_42XX_REQUEST) != 0); } static void ida_v4_submit(struct ida_softc *ida, struct ida_qcb *qcb) { ida_outl(ida, R_42XX_REQUEST, qcb->hwqcb_busaddr); } static bus_addr_t ida_v4_done(struct ida_softc *ida) { bus_addr_t completed; completed = ida_inl(ida, R_42XX_REPLY); if (completed == -1) return (0); /* fifo is empty */ ida_outl(ida, R_42XX_REPLY, 0); /* confirm read */ return (completed); } static int ida_v4_int_pending(struct ida_softc *ida) { return (ida_inl(ida, R_42XX_STATUS) & STATUS_42XX_INT_PENDING); } static void ida_v4_int_enable(struct ida_softc *ida, int enable) { if (enable) ida->flags |= IDA_INTERRUPTS; else ida->flags &= ~IDA_INTERRUPTS; ida_outl(ida, R_42XX_INT_MASK, enable ? INT_ENABLE_42XX : INT_DISABLE_42XX); } static struct ida_access ida_v3_access = { ida_v3_fifo_full, ida_v3_submit, ida_v3_done, ida_v3_int_pending, ida_v3_int_enable, }; static struct ida_access ida_v4_access = { ida_v4_fifo_full, ida_v4_submit, ida_v4_done, ida_v4_int_pending, ida_v4_int_enable, }; static struct ida_board board_id[] = { { 0x40300E11, "Compaq SMART-2/P array controller", &ida_v3_access, 0 }, { 0x40310E11, "Compaq SMART-2SL array controller", &ida_v3_access, 0 }, { 0x40320E11, "Compaq Smart Array 3200 controller", &ida_v3_access, 0 }, { 0x40330E11, "Compaq Smart Array 3100ES controller", &ida_v3_access, 0 }, { 0x40340E11, "Compaq Smart Array 221 controller", &ida_v3_access, 0 }, { 0x40400E11, "Compaq Integrated Array controller", &ida_v4_access, IDA_FIRMWARE }, { 0x40480E11, "Compaq RAID LC2 controller", &ida_v4_access, IDA_FIRMWARE }, { 0x40500E11, "Compaq Smart Array 4200 controller", &ida_v4_access, 0 }, { 0x40510E11, "Compaq Smart Array 4250ES controller", &ida_v4_access, 0 }, { 0x40580E11, "Compaq Smart Array 431 controller", &ida_v4_access, 0 }, { 0, "", 0, 0 }, }; static int ida_pci_probe(device_t dev); static int ida_pci_attach(device_t dev); static device_method_t ida_pci_methods[] = { DEVMETHOD(device_probe, ida_pci_probe), DEVMETHOD(device_attach, ida_pci_attach), DEVMETHOD(device_detach, ida_detach), DEVMETHOD_END }; static driver_t ida_pci_driver = { "ida", ida_pci_methods, sizeof(struct ida_softc) }; static devclass_t ida_devclass; static struct ida_board * ida_pci_match(device_t dev) { int i; u_int32_t id, sub_id; id = pci_get_devid(dev); sub_id = pci_get_subdevice(dev) << 16 | pci_get_subvendor(dev); if (id == IDA_DEVICEID_SMART || id == IDA_DEVICEID_DEC_SMART || id == IDA_DEVICEID_NCR_53C1510) { for (i = 0; board_id[i].board; i++) if (board_id[i].board == sub_id) return (&board_id[i]); } return (NULL); } static int ida_pci_probe(device_t dev) { struct ida_board *board = ida_pci_match(dev); if (board != NULL) { device_set_desc(dev, board->desc); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int ida_pci_attach(device_t dev) { struct ida_board *board = ida_pci_match(dev); u_int32_t id = pci_get_devid(dev); struct ida_softc *ida; int error, rid; ida = (struct ida_softc *)device_get_softc(dev); ida->dev = dev; ida->cmd = *board->accessor; ida->flags = board->flags; mtx_init(&ida->lock, "ida", NULL, MTX_DEF); callout_init_mtx(&ida->ch, &ida->lock, 0); ida->regs_res_type = SYS_RES_MEMORY; ida->regs_res_id = IDA_PCI_MEMADDR; if (id == IDA_DEVICEID_DEC_SMART) ida->regs_res_id = PCIR_BAR(0); ida->regs = bus_alloc_resource_any(dev, ida->regs_res_type, &ida->regs_res_id, RF_ACTIVE); if (ida->regs == NULL) { device_printf(dev, "can't allocate memory resources\n"); return (ENOMEM); } error = bus_dma_tag_create( /* parent */ bus_get_dma_tag(dev), /* alignment */ 1, /* boundary */ 0, /* lowaddr */ BUS_SPACE_MAXADDR_32BIT, /* highaddr */ BUS_SPACE_MAXADDR, /* filter */ NULL, /* filterarg */ NULL, /* maxsize */ BUS_SPACE_MAXSIZE_32BIT, /* nsegments */ BUS_SPACE_UNRESTRICTED, /* maxsegsize */ BUS_SPACE_MAXSIZE_32BIT, /* flags */ BUS_DMA_ALLOCNOW, /* lockfunc */ NULL, /* lockarg */ NULL, &ida->parent_dmat); if (error != 0) { device_printf(dev, "can't allocate DMA tag\n"); ida_free(ida); return (ENOMEM); } rid = 0; ida->irq_res_type = SYS_RES_IRQ; ida->irq = bus_alloc_resource_any(dev, ida->irq_res_type, &rid, RF_ACTIVE | RF_SHAREABLE); if (ida->irq == NULL) { ida_free(ida); return (ENOMEM); } error = bus_setup_intr(dev, ida->irq, INTR_TYPE_BIO | INTR_ENTROPY | INTR_MPSAFE, NULL, ida_intr, ida, &ida->ih); if (error) { device_printf(dev, "can't setup interrupt\n"); ida_free(ida); return (ENOMEM); } error = ida_setup(ida); if (error) { ida_free(ida); return (error); } return (0); } DRIVER_MODULE(ida, pci, ida_pci_driver, ida_devclass, 0, 0); +MODULE_PNP_INFO("W32:vendor/device;D:#", pci, ida, board_id, + nitems(board_id) - 1); Index: head/sys/dev/iwn/if_iwn.c =================================================================== --- head/sys/dev/iwn/if_iwn.c (revision 338948) +++ head/sys/dev/iwn/if_iwn.c (revision 338949) @@ -1,9031 +1,9032 @@ /*- * Copyright (c) 2007-2009 Damien Bergamini * Copyright (c) 2008 Benjamin Close * Copyright (c) 2008 Sam Leffler, Errno Consulting * Copyright (c) 2011 Intel Corporation * Copyright (c) 2013 Cedric GROSS * Copyright (c) 2013 Adrian Chadd * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* * Driver for Intel WiFi Link 4965 and 1000/5000/6000 Series 802.11 network * adapters. */ #include __FBSDID("$FreeBSD$"); #include "opt_wlan.h" #include "opt_iwn.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include struct iwn_ident { uint16_t vendor; uint16_t device; const char *name; }; static const struct iwn_ident iwn_ident_table[] = { { 0x8086, IWN_DID_6x05_1, "Intel Centrino Advanced-N 6205" }, { 0x8086, IWN_DID_1000_1, "Intel Centrino Wireless-N 1000" }, { 0x8086, IWN_DID_1000_2, "Intel Centrino Wireless-N 1000" }, { 0x8086, IWN_DID_6x05_2, "Intel Centrino Advanced-N 6205" }, { 0x8086, IWN_DID_6050_1, "Intel Centrino Advanced-N + WiMAX 6250" }, { 0x8086, IWN_DID_6050_2, "Intel Centrino Advanced-N + WiMAX 6250" }, { 0x8086, IWN_DID_x030_1, "Intel Centrino Wireless-N 1030" }, { 0x8086, IWN_DID_x030_2, "Intel Centrino Wireless-N 1030" }, { 0x8086, IWN_DID_x030_3, "Intel Centrino Advanced-N 6230" }, { 0x8086, IWN_DID_x030_4, "Intel Centrino Advanced-N 6230" }, { 0x8086, IWN_DID_6150_1, "Intel Centrino Wireless-N + WiMAX 6150" }, { 0x8086, IWN_DID_6150_2, "Intel Centrino Wireless-N + WiMAX 6150" }, { 0x8086, IWN_DID_2x00_1, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, { 0x8086, IWN_DID_2x00_2, "Intel(R) Centrino(R) Wireless-N 2200 BGN" }, /* XXX 2200D is IWN_SDID_2x00_4; there's no way to express this here! */ { 0x8086, IWN_DID_2x30_1, "Intel Centrino Wireless-N 2230" }, { 0x8086, IWN_DID_2x30_2, "Intel Centrino Wireless-N 2230" }, { 0x8086, IWN_DID_130_1, "Intel Centrino Wireless-N 130" }, { 0x8086, IWN_DID_130_2, "Intel Centrino Wireless-N 130" }, { 0x8086, IWN_DID_100_1, "Intel Centrino Wireless-N 100" }, { 0x8086, IWN_DID_100_2, "Intel Centrino Wireless-N 100" }, { 0x8086, IWN_DID_105_1, "Intel Centrino Wireless-N 105" }, { 0x8086, IWN_DID_105_2, "Intel Centrino Wireless-N 105" }, { 0x8086, IWN_DID_135_1, "Intel Centrino Wireless-N 135" }, { 0x8086, IWN_DID_135_2, "Intel Centrino Wireless-N 135" }, { 0x8086, IWN_DID_4965_1, "Intel Wireless WiFi Link 4965" }, { 0x8086, IWN_DID_6x00_1, "Intel Centrino Ultimate-N 6300" }, { 0x8086, IWN_DID_6x00_2, "Intel Centrino Advanced-N 6200" }, { 0x8086, IWN_DID_4965_2, "Intel Wireless WiFi Link 4965" }, { 0x8086, IWN_DID_4965_3, "Intel Wireless WiFi Link 4965" }, { 0x8086, IWN_DID_5x00_1, "Intel WiFi Link 5100" }, { 0x8086, IWN_DID_4965_4, "Intel Wireless WiFi Link 4965" }, { 0x8086, IWN_DID_5x00_3, "Intel Ultimate N WiFi Link 5300" }, { 0x8086, IWN_DID_5x00_4, "Intel Ultimate N WiFi Link 5300" }, { 0x8086, IWN_DID_5x00_2, "Intel WiFi Link 5100" }, { 0x8086, IWN_DID_6x00_3, "Intel Centrino Ultimate-N 6300" }, { 0x8086, IWN_DID_6x00_4, "Intel Centrino Advanced-N 6200" }, { 0x8086, IWN_DID_5x50_1, "Intel WiMAX/WiFi Link 5350" }, { 0x8086, IWN_DID_5x50_2, "Intel WiMAX/WiFi Link 5350" }, { 0x8086, IWN_DID_5x50_3, "Intel WiMAX/WiFi Link 5150" }, { 0x8086, IWN_DID_5x50_4, "Intel WiMAX/WiFi Link 5150" }, { 0x8086, IWN_DID_6035_1, "Intel Centrino Advanced 6235" }, { 0x8086, IWN_DID_6035_2, "Intel Centrino Advanced 6235" }, { 0, 0, NULL } }; static int iwn_probe(device_t); static int iwn_attach(device_t); static int iwn4965_attach(struct iwn_softc *, uint16_t); static int iwn5000_attach(struct iwn_softc *, uint16_t); static int iwn_config_specific(struct iwn_softc *, uint16_t); static void iwn_radiotap_attach(struct iwn_softc *); static void iwn_sysctlattach(struct iwn_softc *); static struct ieee80211vap *iwn_vap_create(struct ieee80211com *, const char [IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t [IEEE80211_ADDR_LEN], const uint8_t [IEEE80211_ADDR_LEN]); static void iwn_vap_delete(struct ieee80211vap *); static int iwn_detach(device_t); static int iwn_shutdown(device_t); static int iwn_suspend(device_t); static int iwn_resume(device_t); static int iwn_nic_lock(struct iwn_softc *); static int iwn_eeprom_lock(struct iwn_softc *); static int iwn_init_otprom(struct iwn_softc *); static int iwn_read_prom_data(struct iwn_softc *, uint32_t, void *, int); static void iwn_dma_map_addr(void *, bus_dma_segment_t *, int, int); static int iwn_dma_contig_alloc(struct iwn_softc *, struct iwn_dma_info *, void **, bus_size_t, bus_size_t); static void iwn_dma_contig_free(struct iwn_dma_info *); static int iwn_alloc_sched(struct iwn_softc *); static void iwn_free_sched(struct iwn_softc *); static int iwn_alloc_kw(struct iwn_softc *); static void iwn_free_kw(struct iwn_softc *); static int iwn_alloc_ict(struct iwn_softc *); static void iwn_free_ict(struct iwn_softc *); static int iwn_alloc_fwmem(struct iwn_softc *); static void iwn_free_fwmem(struct iwn_softc *); static int iwn_alloc_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); static void iwn_reset_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); static void iwn_free_rx_ring(struct iwn_softc *, struct iwn_rx_ring *); static int iwn_alloc_tx_ring(struct iwn_softc *, struct iwn_tx_ring *, int); static void iwn_reset_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); static void iwn_free_tx_ring(struct iwn_softc *, struct iwn_tx_ring *); static void iwn5000_ict_reset(struct iwn_softc *); static int iwn_read_eeprom(struct iwn_softc *, uint8_t macaddr[IEEE80211_ADDR_LEN]); static void iwn4965_read_eeprom(struct iwn_softc *); #ifdef IWN_DEBUG static void iwn4965_print_power_group(struct iwn_softc *, int); #endif static void iwn5000_read_eeprom(struct iwn_softc *); static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *); static void iwn_read_eeprom_band(struct iwn_softc *, int, int, int *, struct ieee80211_channel[]); static void iwn_read_eeprom_ht40(struct iwn_softc *, int, int, int *, struct ieee80211_channel[]); static void iwn_read_eeprom_channels(struct iwn_softc *, int, uint32_t); static struct iwn_eeprom_chan *iwn_find_eeprom_channel(struct iwn_softc *, struct ieee80211_channel *); static void iwn_getradiocaps(struct ieee80211com *, int, int *, struct ieee80211_channel[]); static int iwn_setregdomain(struct ieee80211com *, struct ieee80211_regdomain *, int, struct ieee80211_channel[]); static void iwn_read_eeprom_enhinfo(struct iwn_softc *); static struct ieee80211_node *iwn_node_alloc(struct ieee80211vap *, const uint8_t mac[IEEE80211_ADDR_LEN]); static void iwn_newassoc(struct ieee80211_node *, int); static int iwn_media_change(struct ifnet *); static int iwn_newstate(struct ieee80211vap *, enum ieee80211_state, int); static void iwn_calib_timeout(void *); static void iwn_rx_phy(struct iwn_softc *, struct iwn_rx_desc *); static void iwn_rx_done(struct iwn_softc *, struct iwn_rx_desc *, struct iwn_rx_data *); static void iwn_rx_compressed_ba(struct iwn_softc *, struct iwn_rx_desc *); static void iwn5000_rx_calib_results(struct iwn_softc *, struct iwn_rx_desc *); static void iwn_rx_statistics(struct iwn_softc *, struct iwn_rx_desc *); static void iwn4965_tx_done(struct iwn_softc *, struct iwn_rx_desc *, struct iwn_rx_data *); static void iwn5000_tx_done(struct iwn_softc *, struct iwn_rx_desc *, struct iwn_rx_data *); static void iwn_tx_done(struct iwn_softc *, struct iwn_rx_desc *, int, int, uint8_t); static void iwn_ampdu_tx_done(struct iwn_softc *, int, int, int, int, int, void *); static void iwn_cmd_done(struct iwn_softc *, struct iwn_rx_desc *); static void iwn_notif_intr(struct iwn_softc *); static void iwn_wakeup_intr(struct iwn_softc *); static void iwn_rftoggle_task(void *, int); static void iwn_fatal_intr(struct iwn_softc *); static void iwn_intr(void *); static void iwn4965_update_sched(struct iwn_softc *, int, int, uint8_t, uint16_t); static void iwn5000_update_sched(struct iwn_softc *, int, int, uint8_t, uint16_t); #ifdef notyet static void iwn5000_reset_sched(struct iwn_softc *, int, int); #endif static int iwn_tx_data(struct iwn_softc *, struct mbuf *, struct ieee80211_node *); static int iwn_tx_data_raw(struct iwn_softc *, struct mbuf *, struct ieee80211_node *, const struct ieee80211_bpf_params *params); static int iwn_tx_cmd(struct iwn_softc *, struct mbuf *, struct ieee80211_node *, struct iwn_tx_ring *); static void iwn_xmit_task(void *arg0, int pending); static int iwn_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *); static int iwn_transmit(struct ieee80211com *, struct mbuf *); static void iwn_scan_timeout(void *); static void iwn_watchdog(void *); static int iwn_ioctl(struct ieee80211com *, u_long , void *); static void iwn_parent(struct ieee80211com *); static int iwn_cmd(struct iwn_softc *, int, const void *, int, int); static int iwn4965_add_node(struct iwn_softc *, struct iwn_node_info *, int); static int iwn5000_add_node(struct iwn_softc *, struct iwn_node_info *, int); static int iwn_set_link_quality(struct iwn_softc *, struct ieee80211_node *); static int iwn_add_broadcast_node(struct iwn_softc *, int); static int iwn_updateedca(struct ieee80211com *); static void iwn_set_promisc(struct iwn_softc *); static void iwn_update_promisc(struct ieee80211com *); static void iwn_update_mcast(struct ieee80211com *); static void iwn_set_led(struct iwn_softc *, uint8_t, uint8_t, uint8_t); static int iwn_set_critical_temp(struct iwn_softc *); static int iwn_set_timing(struct iwn_softc *, struct ieee80211_node *); static void iwn4965_power_calibration(struct iwn_softc *, int); static int iwn4965_set_txpower(struct iwn_softc *, int); static int iwn5000_set_txpower(struct iwn_softc *, int); static int iwn4965_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); static int iwn5000_get_rssi(struct iwn_softc *, struct iwn_rx_stat *); static int iwn_get_noise(const struct iwn_rx_general_stats *); static int iwn4965_get_temperature(struct iwn_softc *); static int iwn5000_get_temperature(struct iwn_softc *); static int iwn_init_sensitivity(struct iwn_softc *); static void iwn_collect_noise(struct iwn_softc *, const struct iwn_rx_general_stats *); static int iwn4965_init_gains(struct iwn_softc *); static int iwn5000_init_gains(struct iwn_softc *); static int iwn4965_set_gains(struct iwn_softc *); static int iwn5000_set_gains(struct iwn_softc *); static void iwn_tune_sensitivity(struct iwn_softc *, const struct iwn_rx_stats *); static void iwn_save_stats_counters(struct iwn_softc *, const struct iwn_stats *); static int iwn_send_sensitivity(struct iwn_softc *); static void iwn_check_rx_recovery(struct iwn_softc *, struct iwn_stats *); static int iwn_set_pslevel(struct iwn_softc *, int, int, int); static int iwn_send_btcoex(struct iwn_softc *); static int iwn_send_advanced_btcoex(struct iwn_softc *); static int iwn5000_runtime_calib(struct iwn_softc *); static int iwn_check_bss_filter(struct iwn_softc *); static int iwn4965_rxon_assoc(struct iwn_softc *, int); static int iwn5000_rxon_assoc(struct iwn_softc *, int); static int iwn_send_rxon(struct iwn_softc *, int, int); static int iwn_config(struct iwn_softc *); static int iwn_scan(struct iwn_softc *, struct ieee80211vap *, struct ieee80211_scan_state *, struct ieee80211_channel *); static int iwn_auth(struct iwn_softc *, struct ieee80211vap *vap); static int iwn_run(struct iwn_softc *, struct ieee80211vap *vap); static int iwn_ampdu_rx_start(struct ieee80211_node *, struct ieee80211_rx_ampdu *, int, int, int); static void iwn_ampdu_rx_stop(struct ieee80211_node *, struct ieee80211_rx_ampdu *); static int iwn_addba_request(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int, int, int); static int iwn_addba_response(struct ieee80211_node *, struct ieee80211_tx_ampdu *, int, int, int); static int iwn_ampdu_tx_start(struct ieee80211com *, struct ieee80211_node *, uint8_t); static void iwn_ampdu_tx_stop(struct ieee80211_node *, struct ieee80211_tx_ampdu *); static void iwn4965_ampdu_tx_start(struct iwn_softc *, struct ieee80211_node *, int, uint8_t, uint16_t); static void iwn4965_ampdu_tx_stop(struct iwn_softc *, int, uint8_t, uint16_t); static void iwn5000_ampdu_tx_start(struct iwn_softc *, struct ieee80211_node *, int, uint8_t, uint16_t); static void iwn5000_ampdu_tx_stop(struct iwn_softc *, int, uint8_t, uint16_t); static int iwn5000_query_calibration(struct iwn_softc *); static int iwn5000_send_calibration(struct iwn_softc *); static int iwn5000_send_wimax_coex(struct iwn_softc *); static int iwn5000_crystal_calib(struct iwn_softc *); static int iwn5000_temp_offset_calib(struct iwn_softc *); static int iwn5000_temp_offset_calibv2(struct iwn_softc *); static int iwn4965_post_alive(struct iwn_softc *); static int iwn5000_post_alive(struct iwn_softc *); static int iwn4965_load_bootcode(struct iwn_softc *, const uint8_t *, int); static int iwn4965_load_firmware(struct iwn_softc *); static int iwn5000_load_firmware_section(struct iwn_softc *, uint32_t, const uint8_t *, int); static int iwn5000_load_firmware(struct iwn_softc *); static int iwn_read_firmware_leg(struct iwn_softc *, struct iwn_fw_info *); static int iwn_read_firmware_tlv(struct iwn_softc *, struct iwn_fw_info *, uint16_t); static int iwn_read_firmware(struct iwn_softc *); static void iwn_unload_firmware(struct iwn_softc *); static int iwn_clock_wait(struct iwn_softc *); static int iwn_apm_init(struct iwn_softc *); static void iwn_apm_stop_master(struct iwn_softc *); static void iwn_apm_stop(struct iwn_softc *); static int iwn4965_nic_config(struct iwn_softc *); static int iwn5000_nic_config(struct iwn_softc *); static int iwn_hw_prepare(struct iwn_softc *); static int iwn_hw_init(struct iwn_softc *); static void iwn_hw_stop(struct iwn_softc *); static void iwn_panicked(void *, int); static int iwn_init_locked(struct iwn_softc *); static int iwn_init(struct iwn_softc *); static void iwn_stop_locked(struct iwn_softc *); static void iwn_stop(struct iwn_softc *); static void iwn_scan_start(struct ieee80211com *); static void iwn_scan_end(struct ieee80211com *); static void iwn_set_channel(struct ieee80211com *); static void iwn_scan_curchan(struct ieee80211_scan_state *, unsigned long); static void iwn_scan_mindwell(struct ieee80211_scan_state *); #ifdef IWN_DEBUG static char *iwn_get_csr_string(int); static void iwn_debug_register(struct iwn_softc *); #endif static device_method_t iwn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, iwn_probe), DEVMETHOD(device_attach, iwn_attach), DEVMETHOD(device_detach, iwn_detach), DEVMETHOD(device_shutdown, iwn_shutdown), DEVMETHOD(device_suspend, iwn_suspend), DEVMETHOD(device_resume, iwn_resume), DEVMETHOD_END }; static driver_t iwn_driver = { "iwn", iwn_methods, sizeof(struct iwn_softc) }; static devclass_t iwn_devclass; DRIVER_MODULE(iwn, pci, iwn_driver, iwn_devclass, NULL, NULL); - +MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, iwn, iwn_ident_table, + nitems(iwn_ident_table) - 1); MODULE_VERSION(iwn, 1); MODULE_DEPEND(iwn, firmware, 1, 1, 1); MODULE_DEPEND(iwn, pci, 1, 1, 1); MODULE_DEPEND(iwn, wlan, 1, 1, 1); static d_ioctl_t iwn_cdev_ioctl; static d_open_t iwn_cdev_open; static d_close_t iwn_cdev_close; static struct cdevsw iwn_cdevsw = { .d_version = D_VERSION, .d_flags = 0, .d_open = iwn_cdev_open, .d_close = iwn_cdev_close, .d_ioctl = iwn_cdev_ioctl, .d_name = "iwn", }; static int iwn_probe(device_t dev) { const struct iwn_ident *ident; for (ident = iwn_ident_table; ident->name != NULL; ident++) { if (pci_get_vendor(dev) == ident->vendor && pci_get_device(dev) == ident->device) { device_set_desc(dev, ident->name); return (BUS_PROBE_DEFAULT); } } return ENXIO; } static int iwn_is_3stream_device(struct iwn_softc *sc) { /* XXX for now only 5300, until the 5350 can be tested */ if (sc->hw_type == IWN_HW_REV_TYPE_5300) return (1); return (0); } static int iwn_attach(device_t dev) { struct iwn_softc *sc = device_get_softc(dev); struct ieee80211com *ic; int i, error, rid; sc->sc_dev = dev; #ifdef IWN_DEBUG error = resource_int_value(device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev), "debug", &(sc->sc_debug)); if (error != 0) sc->sc_debug = 0; #else sc->sc_debug = 0; #endif DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: begin\n",__func__); /* * Get the offset of the PCI Express Capability Structure in PCI * Configuration Space. */ error = pci_find_cap(dev, PCIY_EXPRESS, &sc->sc_cap_off); if (error != 0) { device_printf(dev, "PCIe capability structure not found!\n"); return error; } /* Clear device-specific "PCI retry timeout" register (41h). */ pci_write_config(dev, 0x41, 0, 1); /* Enable bus-mastering. */ pci_enable_busmaster(dev); rid = PCIR_BAR(0); sc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->mem == NULL) { device_printf(dev, "can't map mem space\n"); error = ENOMEM; return error; } sc->sc_st = rman_get_bustag(sc->mem); sc->sc_sh = rman_get_bushandle(sc->mem); i = 1; rid = 0; if (pci_alloc_msi(dev, &i) == 0) rid = 1; /* Install interrupt handler. */ sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); if (sc->irq == NULL) { device_printf(dev, "can't map interrupt\n"); error = ENOMEM; goto fail; } IWN_LOCK_INIT(sc); /* Read hardware revision and attach. */ sc->hw_type = (IWN_READ(sc, IWN_HW_REV) >> IWN_HW_REV_TYPE_SHIFT) & IWN_HW_REV_TYPE_MASK; sc->subdevice_id = pci_get_subdevice(dev); /* * 4965 versus 5000 and later have different methods. * Let's set those up first. */ if (sc->hw_type == IWN_HW_REV_TYPE_4965) error = iwn4965_attach(sc, pci_get_device(dev)); else error = iwn5000_attach(sc, pci_get_device(dev)); if (error != 0) { device_printf(dev, "could not attach device, error %d\n", error); goto fail; } /* * Next, let's setup the various parameters of each NIC. */ error = iwn_config_specific(sc, pci_get_device(dev)); if (error != 0) { device_printf(dev, "could not attach device, error %d\n", error); goto fail; } if ((error = iwn_hw_prepare(sc)) != 0) { device_printf(dev, "hardware not ready, error %d\n", error); goto fail; } /* Allocate DMA memory for firmware transfers. */ if ((error = iwn_alloc_fwmem(sc)) != 0) { device_printf(dev, "could not allocate memory for firmware, error %d\n", error); goto fail; } /* Allocate "Keep Warm" page. */ if ((error = iwn_alloc_kw(sc)) != 0) { device_printf(dev, "could not allocate keep warm page, error %d\n", error); goto fail; } /* Allocate ICT table for 5000 Series. */ if (sc->hw_type != IWN_HW_REV_TYPE_4965 && (error = iwn_alloc_ict(sc)) != 0) { device_printf(dev, "could not allocate ICT table, error %d\n", error); goto fail; } /* Allocate TX scheduler "rings". */ if ((error = iwn_alloc_sched(sc)) != 0) { device_printf(dev, "could not allocate TX scheduler rings, error %d\n", error); goto fail; } /* Allocate TX rings (16 on 4965AGN, 20 on >=5000). */ for (i = 0; i < sc->ntxqs; i++) { if ((error = iwn_alloc_tx_ring(sc, &sc->txq[i], i)) != 0) { device_printf(dev, "could not allocate TX ring %d, error %d\n", i, error); goto fail; } } /* Allocate RX ring. */ if ((error = iwn_alloc_rx_ring(sc, &sc->rxq)) != 0) { device_printf(dev, "could not allocate RX ring, error %d\n", error); goto fail; } /* Clear pending interrupts. */ IWN_WRITE(sc, IWN_INT, 0xffffffff); ic = &sc->sc_ic; ic->ic_softc = sc; ic->ic_name = device_get_nameunit(dev); ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */ ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */ /* Set device capabilities. */ ic->ic_caps = IEEE80211_C_STA /* station mode supported */ | IEEE80211_C_MONITOR /* monitor mode supported */ #if 0 | IEEE80211_C_BGSCAN /* background scanning */ #endif | IEEE80211_C_TXPMGT /* tx power management */ | IEEE80211_C_SHSLOT /* short slot time supported */ | IEEE80211_C_WPA | IEEE80211_C_SHPREAMBLE /* short preamble supported */ #if 0 | IEEE80211_C_IBSS /* ibss/adhoc mode */ #endif | IEEE80211_C_WME /* WME */ | IEEE80211_C_PMGT /* Station-side power mgmt */ ; /* Read MAC address, channels, etc from EEPROM. */ if ((error = iwn_read_eeprom(sc, ic->ic_macaddr)) != 0) { device_printf(dev, "could not read EEPROM, error %d\n", error); goto fail; } /* Count the number of available chains. */ sc->ntxchains = ((sc->txchainmask >> 2) & 1) + ((sc->txchainmask >> 1) & 1) + ((sc->txchainmask >> 0) & 1); sc->nrxchains = ((sc->rxchainmask >> 2) & 1) + ((sc->rxchainmask >> 1) & 1) + ((sc->rxchainmask >> 0) & 1); if (bootverbose) { device_printf(dev, "MIMO %dT%dR, %.4s, address %6D\n", sc->ntxchains, sc->nrxchains, sc->eeprom_domain, ic->ic_macaddr, ":"); } if (sc->sc_flags & IWN_FLAG_HAS_11N) { ic->ic_rxstream = sc->nrxchains; ic->ic_txstream = sc->ntxchains; /* * Some of the 3 antenna devices (ie, the 4965) only supports * 2x2 operation. So correct the number of streams if * it's not a 3-stream device. */ if (! iwn_is_3stream_device(sc)) { if (ic->ic_rxstream > 2) ic->ic_rxstream = 2; if (ic->ic_txstream > 2) ic->ic_txstream = 2; } ic->ic_htcaps = IEEE80211_HTCAP_SMPS_OFF /* SMPS mode disabled */ | IEEE80211_HTCAP_SHORTGI20 /* short GI in 20MHz */ | IEEE80211_HTCAP_CHWIDTH40 /* 40MHz channel width*/ | IEEE80211_HTCAP_SHORTGI40 /* short GI in 40MHz */ #ifdef notyet | IEEE80211_HTCAP_GREENFIELD #if IWN_RBUF_SIZE == 8192 | IEEE80211_HTCAP_MAXAMSDU_7935 /* max A-MSDU length */ #else | IEEE80211_HTCAP_MAXAMSDU_3839 /* max A-MSDU length */ #endif #endif /* s/w capabilities */ | IEEE80211_HTC_HT /* HT operation */ | IEEE80211_HTC_AMPDU /* tx A-MPDU */ #ifdef notyet | IEEE80211_HTC_AMSDU /* tx A-MSDU */ #endif ; } ieee80211_ifattach(ic); ic->ic_vap_create = iwn_vap_create; ic->ic_ioctl = iwn_ioctl; ic->ic_parent = iwn_parent; ic->ic_vap_delete = iwn_vap_delete; ic->ic_transmit = iwn_transmit; ic->ic_raw_xmit = iwn_raw_xmit; ic->ic_node_alloc = iwn_node_alloc; sc->sc_ampdu_rx_start = ic->ic_ampdu_rx_start; ic->ic_ampdu_rx_start = iwn_ampdu_rx_start; sc->sc_ampdu_rx_stop = ic->ic_ampdu_rx_stop; ic->ic_ampdu_rx_stop = iwn_ampdu_rx_stop; sc->sc_addba_request = ic->ic_addba_request; ic->ic_addba_request = iwn_addba_request; sc->sc_addba_response = ic->ic_addba_response; ic->ic_addba_response = iwn_addba_response; sc->sc_addba_stop = ic->ic_addba_stop; ic->ic_addba_stop = iwn_ampdu_tx_stop; ic->ic_newassoc = iwn_newassoc; ic->ic_wme.wme_update = iwn_updateedca; ic->ic_update_promisc = iwn_update_promisc; ic->ic_update_mcast = iwn_update_mcast; ic->ic_scan_start = iwn_scan_start; ic->ic_scan_end = iwn_scan_end; ic->ic_set_channel = iwn_set_channel; ic->ic_scan_curchan = iwn_scan_curchan; ic->ic_scan_mindwell = iwn_scan_mindwell; ic->ic_getradiocaps = iwn_getradiocaps; ic->ic_setregdomain = iwn_setregdomain; iwn_radiotap_attach(sc); callout_init_mtx(&sc->calib_to, &sc->sc_mtx, 0); callout_init_mtx(&sc->scan_timeout, &sc->sc_mtx, 0); callout_init_mtx(&sc->watchdog_to, &sc->sc_mtx, 0); TASK_INIT(&sc->sc_rftoggle_task, 0, iwn_rftoggle_task, sc); TASK_INIT(&sc->sc_panic_task, 0, iwn_panicked, sc); TASK_INIT(&sc->sc_xmit_task, 0, iwn_xmit_task, sc); mbufq_init(&sc->sc_xmit_queue, 1024); sc->sc_tq = taskqueue_create("iwn_taskq", M_WAITOK, taskqueue_thread_enqueue, &sc->sc_tq); error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwn_taskq"); if (error != 0) { device_printf(dev, "can't start threads, error %d\n", error); goto fail; } iwn_sysctlattach(sc); /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, sc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, iwn_intr, sc, &sc->sc_ih); if (error != 0) { device_printf(dev, "can't establish interrupt, error %d\n", error); goto fail; } #if 0 device_printf(sc->sc_dev, "%s: rx_stats=%d, rx_stats_bt=%d\n", __func__, sizeof(struct iwn_stats), sizeof(struct iwn_stats_bt)); #endif if (bootverbose) ieee80211_announce(ic); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); /* Add debug ioctl right at the end */ sc->sc_cdev = make_dev(&iwn_cdevsw, device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s", device_get_nameunit(dev)); if (sc->sc_cdev == NULL) { device_printf(dev, "failed to create debug character device\n"); } else { sc->sc_cdev->si_drv1 = sc; } return 0; fail: iwn_detach(dev); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); return error; } /* * Define specific configuration based on device id and subdevice id * pid : PCI device id */ static int iwn_config_specific(struct iwn_softc *sc, uint16_t pid) { switch (pid) { /* 4965 series */ case IWN_DID_4965_1: case IWN_DID_4965_2: case IWN_DID_4965_3: case IWN_DID_4965_4: sc->base_params = &iwn4965_base_params; sc->limits = &iwn4965_sensitivity_limits; sc->fwname = "iwn4965fw"; /* Override chains masks, ROM is known to be broken. */ sc->txchainmask = IWN_ANT_AB; sc->rxchainmask = IWN_ANT_ABC; /* Enable normal btcoex */ sc->sc_flags |= IWN_FLAG_BTCOEX; break; /* 1000 Series */ case IWN_DID_1000_1: case IWN_DID_1000_2: switch(sc->subdevice_id) { case IWN_SDID_1000_1: case IWN_SDID_1000_2: case IWN_SDID_1000_3: case IWN_SDID_1000_4: case IWN_SDID_1000_5: case IWN_SDID_1000_6: case IWN_SDID_1000_7: case IWN_SDID_1000_8: case IWN_SDID_1000_9: case IWN_SDID_1000_10: case IWN_SDID_1000_11: case IWN_SDID_1000_12: sc->limits = &iwn1000_sensitivity_limits; sc->base_params = &iwn1000_base_params; sc->fwname = "iwn1000fw"; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 6x00 Series */ case IWN_DID_6x00_2: case IWN_DID_6x00_4: case IWN_DID_6x00_1: case IWN_DID_6x00_3: sc->fwname = "iwn6000fw"; sc->limits = &iwn6000_sensitivity_limits; switch(sc->subdevice_id) { case IWN_SDID_6x00_1: case IWN_SDID_6x00_2: case IWN_SDID_6x00_8: //iwl6000_3agn_cfg sc->base_params = &iwn_6000_base_params; break; case IWN_SDID_6x00_3: case IWN_SDID_6x00_6: case IWN_SDID_6x00_9: ////iwl6000i_2agn case IWN_SDID_6x00_4: case IWN_SDID_6x00_7: case IWN_SDID_6x00_10: //iwl6000i_2abg_cfg case IWN_SDID_6x00_5: //iwl6000i_2bg_cfg sc->base_params = &iwn_6000i_base_params; sc->sc_flags |= IWN_FLAG_INTERNAL_PA; sc->txchainmask = IWN_ANT_BC; sc->rxchainmask = IWN_ANT_BC; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 6x05 Series */ case IWN_DID_6x05_1: case IWN_DID_6x05_2: switch(sc->subdevice_id) { case IWN_SDID_6x05_1: case IWN_SDID_6x05_4: case IWN_SDID_6x05_6: //iwl6005_2agn_cfg case IWN_SDID_6x05_2: case IWN_SDID_6x05_5: case IWN_SDID_6x05_7: //iwl6005_2abg_cfg case IWN_SDID_6x05_3: //iwl6005_2bg_cfg case IWN_SDID_6x05_8: case IWN_SDID_6x05_9: //iwl6005_2agn_sff_cfg case IWN_SDID_6x05_10: //iwl6005_2agn_d_cfg case IWN_SDID_6x05_11: //iwl6005_2agn_mow1_cfg case IWN_SDID_6x05_12: //iwl6005_2agn_mow2_cfg sc->fwname = "iwn6000g2afw"; sc->limits = &iwn6000_sensitivity_limits; sc->base_params = &iwn_6000g2_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 6x35 Series */ case IWN_DID_6035_1: case IWN_DID_6035_2: switch(sc->subdevice_id) { case IWN_SDID_6035_1: case IWN_SDID_6035_2: case IWN_SDID_6035_3: case IWN_SDID_6035_4: case IWN_SDID_6035_5: sc->fwname = "iwn6000g2bfw"; sc->limits = &iwn6235_sensitivity_limits; sc->base_params = &iwn_6235_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 6x50 WiFi/WiMax Series */ case IWN_DID_6050_1: case IWN_DID_6050_2: switch(sc->subdevice_id) { case IWN_SDID_6050_1: case IWN_SDID_6050_3: case IWN_SDID_6050_5: //iwl6050_2agn_cfg case IWN_SDID_6050_2: case IWN_SDID_6050_4: case IWN_SDID_6050_6: //iwl6050_2abg_cfg sc->fwname = "iwn6050fw"; sc->txchainmask = IWN_ANT_AB; sc->rxchainmask = IWN_ANT_AB; sc->limits = &iwn6000_sensitivity_limits; sc->base_params = &iwn_6050_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 6150 WiFi/WiMax Series */ case IWN_DID_6150_1: case IWN_DID_6150_2: switch(sc->subdevice_id) { case IWN_SDID_6150_1: case IWN_SDID_6150_3: case IWN_SDID_6150_5: // iwl6150_bgn_cfg case IWN_SDID_6150_2: case IWN_SDID_6150_4: case IWN_SDID_6150_6: //iwl6150_bg_cfg sc->fwname = "iwn6050fw"; sc->limits = &iwn6000_sensitivity_limits; sc->base_params = &iwn_6150_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 6030 Series and 1030 Series */ case IWN_DID_x030_1: case IWN_DID_x030_2: case IWN_DID_x030_3: case IWN_DID_x030_4: switch(sc->subdevice_id) { case IWN_SDID_x030_1: case IWN_SDID_x030_3: case IWN_SDID_x030_5: // iwl1030_bgn_cfg case IWN_SDID_x030_2: case IWN_SDID_x030_4: case IWN_SDID_x030_6: //iwl1030_bg_cfg case IWN_SDID_x030_7: case IWN_SDID_x030_10: case IWN_SDID_x030_14: //iwl6030_2agn_cfg case IWN_SDID_x030_8: case IWN_SDID_x030_11: case IWN_SDID_x030_15: // iwl6030_2bgn_cfg case IWN_SDID_x030_9: case IWN_SDID_x030_12: case IWN_SDID_x030_16: // iwl6030_2abg_cfg case IWN_SDID_x030_13: //iwl6030_2bg_cfg sc->fwname = "iwn6000g2bfw"; sc->limits = &iwn6000_sensitivity_limits; sc->base_params = &iwn_6000g2b_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 130 Series WiFi */ /* XXX: This series will need adjustment for rate. * see rx_with_siso_diversity in linux kernel */ case IWN_DID_130_1: case IWN_DID_130_2: switch(sc->subdevice_id) { case IWN_SDID_130_1: case IWN_SDID_130_3: case IWN_SDID_130_5: //iwl130_bgn_cfg case IWN_SDID_130_2: case IWN_SDID_130_4: case IWN_SDID_130_6: //iwl130_bg_cfg sc->fwname = "iwn6000g2bfw"; sc->limits = &iwn6000_sensitivity_limits; sc->base_params = &iwn_6000g2b_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 100 Series WiFi */ case IWN_DID_100_1: case IWN_DID_100_2: switch(sc->subdevice_id) { case IWN_SDID_100_1: case IWN_SDID_100_2: case IWN_SDID_100_3: case IWN_SDID_100_4: case IWN_SDID_100_5: case IWN_SDID_100_6: sc->limits = &iwn1000_sensitivity_limits; sc->base_params = &iwn1000_base_params; sc->fwname = "iwn100fw"; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 105 Series */ /* XXX: This series will need adjustment for rate. * see rx_with_siso_diversity in linux kernel */ case IWN_DID_105_1: case IWN_DID_105_2: switch(sc->subdevice_id) { case IWN_SDID_105_1: case IWN_SDID_105_2: case IWN_SDID_105_3: //iwl105_bgn_cfg case IWN_SDID_105_4: //iwl105_bgn_d_cfg sc->limits = &iwn2030_sensitivity_limits; sc->base_params = &iwn2000_base_params; sc->fwname = "iwn105fw"; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 135 Series */ /* XXX: This series will need adjustment for rate. * see rx_with_siso_diversity in linux kernel */ case IWN_DID_135_1: case IWN_DID_135_2: switch(sc->subdevice_id) { case IWN_SDID_135_1: case IWN_SDID_135_2: case IWN_SDID_135_3: sc->limits = &iwn2030_sensitivity_limits; sc->base_params = &iwn2030_base_params; sc->fwname = "iwn135fw"; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 2x00 Series */ case IWN_DID_2x00_1: case IWN_DID_2x00_2: switch(sc->subdevice_id) { case IWN_SDID_2x00_1: case IWN_SDID_2x00_2: case IWN_SDID_2x00_3: //iwl2000_2bgn_cfg case IWN_SDID_2x00_4: //iwl2000_2bgn_d_cfg sc->limits = &iwn2030_sensitivity_limits; sc->base_params = &iwn2000_base_params; sc->fwname = "iwn2000fw"; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice) \n", pid, sc->subdevice_id, sc->hw_type); return ENOTSUP; } break; /* 2x30 Series */ case IWN_DID_2x30_1: case IWN_DID_2x30_2: switch(sc->subdevice_id) { case IWN_SDID_2x30_1: case IWN_SDID_2x30_3: case IWN_SDID_2x30_5: //iwl100_bgn_cfg case IWN_SDID_2x30_2: case IWN_SDID_2x30_4: case IWN_SDID_2x30_6: //iwl100_bg_cfg sc->limits = &iwn2030_sensitivity_limits; sc->base_params = &iwn2030_base_params; sc->fwname = "iwn2030fw"; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 5x00 Series */ case IWN_DID_5x00_1: case IWN_DID_5x00_2: case IWN_DID_5x00_3: case IWN_DID_5x00_4: sc->limits = &iwn5000_sensitivity_limits; sc->base_params = &iwn5000_base_params; sc->fwname = "iwn5000fw"; switch(sc->subdevice_id) { case IWN_SDID_5x00_1: case IWN_SDID_5x00_2: case IWN_SDID_5x00_3: case IWN_SDID_5x00_4: case IWN_SDID_5x00_9: case IWN_SDID_5x00_10: case IWN_SDID_5x00_11: case IWN_SDID_5x00_12: case IWN_SDID_5x00_17: case IWN_SDID_5x00_18: case IWN_SDID_5x00_19: case IWN_SDID_5x00_20: //iwl5100_agn_cfg sc->txchainmask = IWN_ANT_B; sc->rxchainmask = IWN_ANT_AB; break; case IWN_SDID_5x00_5: case IWN_SDID_5x00_6: case IWN_SDID_5x00_13: case IWN_SDID_5x00_14: case IWN_SDID_5x00_21: case IWN_SDID_5x00_22: //iwl5100_bgn_cfg sc->txchainmask = IWN_ANT_B; sc->rxchainmask = IWN_ANT_AB; break; case IWN_SDID_5x00_7: case IWN_SDID_5x00_8: case IWN_SDID_5x00_15: case IWN_SDID_5x00_16: case IWN_SDID_5x00_23: case IWN_SDID_5x00_24: //iwl5100_abg_cfg sc->txchainmask = IWN_ANT_B; sc->rxchainmask = IWN_ANT_AB; break; case IWN_SDID_5x00_25: case IWN_SDID_5x00_26: case IWN_SDID_5x00_27: case IWN_SDID_5x00_28: case IWN_SDID_5x00_29: case IWN_SDID_5x00_30: case IWN_SDID_5x00_31: case IWN_SDID_5x00_32: case IWN_SDID_5x00_33: case IWN_SDID_5x00_34: case IWN_SDID_5x00_35: case IWN_SDID_5x00_36: //iwl5300_agn_cfg sc->txchainmask = IWN_ANT_ABC; sc->rxchainmask = IWN_ANT_ABC; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; /* 5x50 Series */ case IWN_DID_5x50_1: case IWN_DID_5x50_2: case IWN_DID_5x50_3: case IWN_DID_5x50_4: sc->limits = &iwn5000_sensitivity_limits; sc->base_params = &iwn5000_base_params; sc->fwname = "iwn5000fw"; switch(sc->subdevice_id) { case IWN_SDID_5x50_1: case IWN_SDID_5x50_2: case IWN_SDID_5x50_3: //iwl5350_agn_cfg sc->limits = &iwn5000_sensitivity_limits; sc->base_params = &iwn5000_base_params; sc->fwname = "iwn5000fw"; break; case IWN_SDID_5x50_4: case IWN_SDID_5x50_5: case IWN_SDID_5x50_8: case IWN_SDID_5x50_9: case IWN_SDID_5x50_10: case IWN_SDID_5x50_11: //iwl5150_agn_cfg case IWN_SDID_5x50_6: case IWN_SDID_5x50_7: case IWN_SDID_5x50_12: case IWN_SDID_5x50_13: //iwl5150_abg_cfg sc->limits = &iwn5000_sensitivity_limits; sc->fwname = "iwn5150fw"; sc->base_params = &iwn_5x50_base_params; break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id :" "0x%04x rev %d not supported (subdevice)\n", pid, sc->subdevice_id,sc->hw_type); return ENOTSUP; } break; default: device_printf(sc->sc_dev, "adapter type id : 0x%04x sub id : 0x%04x" "rev 0x%08x not supported (device)\n", pid, sc->subdevice_id, sc->hw_type); return ENOTSUP; } return 0; } static int iwn4965_attach(struct iwn_softc *sc, uint16_t pid) { struct iwn_ops *ops = &sc->ops; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); ops->load_firmware = iwn4965_load_firmware; ops->read_eeprom = iwn4965_read_eeprom; ops->post_alive = iwn4965_post_alive; ops->nic_config = iwn4965_nic_config; ops->update_sched = iwn4965_update_sched; ops->get_temperature = iwn4965_get_temperature; ops->get_rssi = iwn4965_get_rssi; ops->set_txpower = iwn4965_set_txpower; ops->init_gains = iwn4965_init_gains; ops->set_gains = iwn4965_set_gains; ops->rxon_assoc = iwn4965_rxon_assoc; ops->add_node = iwn4965_add_node; ops->tx_done = iwn4965_tx_done; ops->ampdu_tx_start = iwn4965_ampdu_tx_start; ops->ampdu_tx_stop = iwn4965_ampdu_tx_stop; sc->ntxqs = IWN4965_NTXQUEUES; sc->firstaggqueue = IWN4965_FIRSTAGGQUEUE; sc->ndmachnls = IWN4965_NDMACHNLS; sc->broadcast_id = IWN4965_ID_BROADCAST; sc->rxonsz = IWN4965_RXONSZ; sc->schedsz = IWN4965_SCHEDSZ; sc->fw_text_maxsz = IWN4965_FW_TEXT_MAXSZ; sc->fw_data_maxsz = IWN4965_FW_DATA_MAXSZ; sc->fwsz = IWN4965_FWSZ; sc->sched_txfact_addr = IWN4965_SCHED_TXFACT; sc->limits = &iwn4965_sensitivity_limits; sc->fwname = "iwn4965fw"; /* Override chains masks, ROM is known to be broken. */ sc->txchainmask = IWN_ANT_AB; sc->rxchainmask = IWN_ANT_ABC; /* Enable normal btcoex */ sc->sc_flags |= IWN_FLAG_BTCOEX; DPRINTF(sc, IWN_DEBUG_TRACE, "%s: end\n",__func__); return 0; } static int iwn5000_attach(struct iwn_softc *sc, uint16_t pid) { struct iwn_ops *ops = &sc->ops; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); ops->load_firmware = iwn5000_load_firmware; ops->read_eeprom = iwn5000_read_eeprom; ops->post_alive = iwn5000_post_alive; ops->nic_config = iwn5000_nic_config; ops->update_sched = iwn5000_update_sched; ops->get_temperature = iwn5000_get_temperature; ops->get_rssi = iwn5000_get_rssi; ops->set_txpower = iwn5000_set_txpower; ops->init_gains = iwn5000_init_gains; ops->set_gains = iwn5000_set_gains; ops->rxon_assoc = iwn5000_rxon_assoc; ops->add_node = iwn5000_add_node; ops->tx_done = iwn5000_tx_done; ops->ampdu_tx_start = iwn5000_ampdu_tx_start; ops->ampdu_tx_stop = iwn5000_ampdu_tx_stop; sc->ntxqs = IWN5000_NTXQUEUES; sc->firstaggqueue = IWN5000_FIRSTAGGQUEUE; sc->ndmachnls = IWN5000_NDMACHNLS; sc->broadcast_id = IWN5000_ID_BROADCAST; sc->rxonsz = IWN5000_RXONSZ; sc->schedsz = IWN5000_SCHEDSZ; sc->fw_text_maxsz = IWN5000_FW_TEXT_MAXSZ; sc->fw_data_maxsz = IWN5000_FW_DATA_MAXSZ; sc->fwsz = IWN5000_FWSZ; sc->sched_txfact_addr = IWN5000_SCHED_TXFACT; sc->reset_noise_gain = IWN5000_PHY_CALIB_RESET_NOISE_GAIN; sc->noise_gain = IWN5000_PHY_CALIB_NOISE_GAIN; return 0; } /* * Attach the interface to 802.11 radiotap. */ static void iwn_radiotap_attach(struct iwn_softc *sc) { DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); ieee80211_radiotap_attach(&sc->sc_ic, &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap), IWN_TX_RADIOTAP_PRESENT, &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap), IWN_RX_RADIOTAP_PRESENT); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); } static void iwn_sysctlattach(struct iwn_softc *sc) { #ifdef IWN_DEBUG struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(sc->sc_dev); struct sysctl_oid *tree = device_get_sysctl_tree(sc->sc_dev); SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "debug", CTLFLAG_RW, &sc->sc_debug, sc->sc_debug, "control debugging printfs"); #endif } static struct ieee80211vap * iwn_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit, enum ieee80211_opmode opmode, int flags, const uint8_t bssid[IEEE80211_ADDR_LEN], const uint8_t mac[IEEE80211_ADDR_LEN]) { struct iwn_softc *sc = ic->ic_softc; struct iwn_vap *ivp; struct ieee80211vap *vap; if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */ return NULL; ivp = malloc(sizeof(struct iwn_vap), M_80211_VAP, M_WAITOK | M_ZERO); vap = &ivp->iv_vap; ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid); ivp->ctx = IWN_RXON_BSS_CTX; vap->iv_bmissthreshold = 10; /* override default */ /* Override with driver methods. */ ivp->iv_newstate = vap->iv_newstate; vap->iv_newstate = iwn_newstate; sc->ivap[IWN_RXON_BSS_CTX] = vap; ieee80211_ratectl_init(vap); /* Complete setup. */ ieee80211_vap_attach(vap, iwn_media_change, ieee80211_media_status, mac); ic->ic_opmode = opmode; return vap; } static void iwn_vap_delete(struct ieee80211vap *vap) { struct iwn_vap *ivp = IWN_VAP(vap); ieee80211_ratectl_deinit(vap); ieee80211_vap_detach(vap); free(ivp, M_80211_VAP); } static void iwn_xmit_queue_drain(struct iwn_softc *sc) { struct mbuf *m; struct ieee80211_node *ni; IWN_LOCK_ASSERT(sc); while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) { ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; ieee80211_free_node(ni); m_freem(m); } } static int iwn_xmit_queue_enqueue(struct iwn_softc *sc, struct mbuf *m) { IWN_LOCK_ASSERT(sc); return (mbufq_enqueue(&sc->sc_xmit_queue, m)); } static int iwn_detach(device_t dev) { struct iwn_softc *sc = device_get_softc(dev); int qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); if (sc->sc_ic.ic_softc != NULL) { /* Free the mbuf queue and node references */ IWN_LOCK(sc); iwn_xmit_queue_drain(sc); IWN_UNLOCK(sc); iwn_stop(sc); taskqueue_drain_all(sc->sc_tq); taskqueue_free(sc->sc_tq); callout_drain(&sc->watchdog_to); callout_drain(&sc->scan_timeout); callout_drain(&sc->calib_to); ieee80211_ifdetach(&sc->sc_ic); } /* Uninstall interrupt handler. */ if (sc->irq != NULL) { bus_teardown_intr(dev, sc->irq, sc->sc_ih); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq), sc->irq); pci_release_msi(dev); } /* Free DMA resources. */ iwn_free_rx_ring(sc, &sc->rxq); for (qid = 0; qid < sc->ntxqs; qid++) iwn_free_tx_ring(sc, &sc->txq[qid]); iwn_free_sched(sc); iwn_free_kw(sc); if (sc->ict != NULL) iwn_free_ict(sc); iwn_free_fwmem(sc); if (sc->mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem), sc->mem); if (sc->sc_cdev) { destroy_dev(sc->sc_cdev); sc->sc_cdev = NULL; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n", __func__); IWN_LOCK_DESTROY(sc); return 0; } static int iwn_shutdown(device_t dev) { struct iwn_softc *sc = device_get_softc(dev); iwn_stop(sc); return 0; } static int iwn_suspend(device_t dev) { struct iwn_softc *sc = device_get_softc(dev); ieee80211_suspend_all(&sc->sc_ic); return 0; } static int iwn_resume(device_t dev) { struct iwn_softc *sc = device_get_softc(dev); /* Clear device-specific "PCI retry timeout" register (41h). */ pci_write_config(dev, 0x41, 0, 1); ieee80211_resume_all(&sc->sc_ic); return 0; } static int iwn_nic_lock(struct iwn_softc *sc) { int ntries; /* Request exclusive access to NIC. */ IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); /* Spin until we actually get the lock. */ for (ntries = 0; ntries < 1000; ntries++) { if ((IWN_READ(sc, IWN_GP_CNTRL) & (IWN_GP_CNTRL_MAC_ACCESS_ENA | IWN_GP_CNTRL_SLEEP)) == IWN_GP_CNTRL_MAC_ACCESS_ENA) return 0; DELAY(10); } return ETIMEDOUT; } static __inline void iwn_nic_unlock(struct iwn_softc *sc) { IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_MAC_ACCESS_REQ); } static __inline uint32_t iwn_prph_read(struct iwn_softc *sc, uint32_t addr) { IWN_WRITE(sc, IWN_PRPH_RADDR, IWN_PRPH_DWORD | addr); IWN_BARRIER_READ_WRITE(sc); return IWN_READ(sc, IWN_PRPH_RDATA); } static __inline void iwn_prph_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) { IWN_WRITE(sc, IWN_PRPH_WADDR, IWN_PRPH_DWORD | addr); IWN_BARRIER_WRITE(sc); IWN_WRITE(sc, IWN_PRPH_WDATA, data); } static __inline void iwn_prph_setbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) { iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) | mask); } static __inline void iwn_prph_clrbits(struct iwn_softc *sc, uint32_t addr, uint32_t mask) { iwn_prph_write(sc, addr, iwn_prph_read(sc, addr) & ~mask); } static __inline void iwn_prph_write_region_4(struct iwn_softc *sc, uint32_t addr, const uint32_t *data, int count) { for (; count > 0; count--, data++, addr += 4) iwn_prph_write(sc, addr, *data); } static __inline uint32_t iwn_mem_read(struct iwn_softc *sc, uint32_t addr) { IWN_WRITE(sc, IWN_MEM_RADDR, addr); IWN_BARRIER_READ_WRITE(sc); return IWN_READ(sc, IWN_MEM_RDATA); } static __inline void iwn_mem_write(struct iwn_softc *sc, uint32_t addr, uint32_t data) { IWN_WRITE(sc, IWN_MEM_WADDR, addr); IWN_BARRIER_WRITE(sc); IWN_WRITE(sc, IWN_MEM_WDATA, data); } static __inline void iwn_mem_write_2(struct iwn_softc *sc, uint32_t addr, uint16_t data) { uint32_t tmp; tmp = iwn_mem_read(sc, addr & ~3); if (addr & 3) tmp = (tmp & 0x0000ffff) | data << 16; else tmp = (tmp & 0xffff0000) | data; iwn_mem_write(sc, addr & ~3, tmp); } static __inline void iwn_mem_read_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t *data, int count) { for (; count > 0; count--, addr += 4) *data++ = iwn_mem_read(sc, addr); } static __inline void iwn_mem_set_region_4(struct iwn_softc *sc, uint32_t addr, uint32_t val, int count) { for (; count > 0; count--, addr += 4) iwn_mem_write(sc, addr, val); } static int iwn_eeprom_lock(struct iwn_softc *sc) { int i, ntries; for (i = 0; i < 100; i++) { /* Request exclusive access to EEPROM. */ IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); /* Spin until we actually get the lock. */ for (ntries = 0; ntries < 100; ntries++) { if (IWN_READ(sc, IWN_HW_IF_CONFIG) & IWN_HW_IF_CONFIG_EEPROM_LOCKED) return 0; DELAY(10); } } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end timeout\n", __func__); return ETIMEDOUT; } static __inline void iwn_eeprom_unlock(struct iwn_softc *sc) { IWN_CLRBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_EEPROM_LOCKED); } /* * Initialize access by host to One Time Programmable ROM. * NB: This kind of ROM can be found on 1000 or 6000 Series only. */ static int iwn_init_otprom(struct iwn_softc *sc) { uint16_t prev, base, next; int count, error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Wait for clock stabilization before accessing prph. */ if ((error = iwn_clock_wait(sc)) != 0) return error; if ((error = iwn_nic_lock(sc)) != 0) return error; iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); DELAY(5); iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_RESET_REQ); iwn_nic_unlock(sc); /* Set auto clock gate disable bit for HW with OTP shadow RAM. */ if (sc->base_params->shadow_ram_support) { IWN_SETBITS(sc, IWN_DBG_LINK_PWR_MGMT, IWN_RESET_LINK_PWR_MGMT_DIS); } IWN_CLRBITS(sc, IWN_EEPROM_GP, IWN_EEPROM_GP_IF_OWNER); /* Clear ECC status. */ IWN_SETBITS(sc, IWN_OTP_GP, IWN_OTP_GP_ECC_CORR_STTS | IWN_OTP_GP_ECC_UNCORR_STTS); /* * Find the block before last block (contains the EEPROM image) * for HW without OTP shadow RAM. */ if (! sc->base_params->shadow_ram_support) { /* Switch to absolute addressing mode. */ IWN_CLRBITS(sc, IWN_OTP_GP, IWN_OTP_GP_RELATIVE_ACCESS); base = prev = 0; for (count = 0; count < sc->base_params->max_ll_items; count++) { error = iwn_read_prom_data(sc, base, &next, 2); if (error != 0) return error; if (next == 0) /* End of linked-list. */ break; prev = base; base = le16toh(next); } if (count == 0 || count == sc->base_params->max_ll_items) return EIO; /* Skip "next" word. */ sc->prom_base = prev + 1; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); return 0; } static int iwn_read_prom_data(struct iwn_softc *sc, uint32_t addr, void *data, int count) { uint8_t *out = data; uint32_t val, tmp; int ntries; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); addr += sc->prom_base; for (; count > 0; count -= 2, addr++) { IWN_WRITE(sc, IWN_EEPROM, addr << 2); for (ntries = 0; ntries < 10; ntries++) { val = IWN_READ(sc, IWN_EEPROM); if (val & IWN_EEPROM_READ_VALID) break; DELAY(5); } if (ntries == 10) { device_printf(sc->sc_dev, "timeout reading ROM at 0x%x\n", addr); return ETIMEDOUT; } if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { /* OTPROM, check for ECC errors. */ tmp = IWN_READ(sc, IWN_OTP_GP); if (tmp & IWN_OTP_GP_ECC_UNCORR_STTS) { device_printf(sc->sc_dev, "OTPROM ECC error at 0x%x\n", addr); return EIO; } if (tmp & IWN_OTP_GP_ECC_CORR_STTS) { /* Correctable ECC error, clear bit. */ IWN_SETBITS(sc, IWN_OTP_GP, IWN_OTP_GP_ECC_CORR_STTS); } } *out++ = val >> 16; if (count > 1) *out++ = val >> 24; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); return 0; } static void iwn_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { if (error != 0) return; KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs)); *(bus_addr_t *)arg = segs[0].ds_addr; } static int iwn_dma_contig_alloc(struct iwn_softc *sc, struct iwn_dma_info *dma, void **kvap, bus_size_t size, bus_size_t alignment) { int error; dma->tag = NULL; dma->size = size; error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), alignment, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, size, 1, size, 0, NULL, NULL, &dma->tag); if (error != 0) goto fail; error = bus_dmamem_alloc(dma->tag, (void **)&dma->vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT, &dma->map); if (error != 0) goto fail; error = bus_dmamap_load(dma->tag, dma->map, dma->vaddr, size, iwn_dma_map_addr, &dma->paddr, BUS_DMA_NOWAIT); if (error != 0) goto fail; bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); if (kvap != NULL) *kvap = dma->vaddr; return 0; fail: iwn_dma_contig_free(dma); return error; } static void iwn_dma_contig_free(struct iwn_dma_info *dma) { if (dma->vaddr != NULL) { bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(dma->tag, dma->map); bus_dmamem_free(dma->tag, dma->vaddr, dma->map); dma->vaddr = NULL; } if (dma->tag != NULL) { bus_dma_tag_destroy(dma->tag); dma->tag = NULL; } } static int iwn_alloc_sched(struct iwn_softc *sc) { /* TX scheduler rings must be aligned on a 1KB boundary. */ return iwn_dma_contig_alloc(sc, &sc->sched_dma, (void **)&sc->sched, sc->schedsz, 1024); } static void iwn_free_sched(struct iwn_softc *sc) { iwn_dma_contig_free(&sc->sched_dma); } static int iwn_alloc_kw(struct iwn_softc *sc) { /* "Keep Warm" page must be aligned on a 4KB boundary. */ return iwn_dma_contig_alloc(sc, &sc->kw_dma, NULL, 4096, 4096); } static void iwn_free_kw(struct iwn_softc *sc) { iwn_dma_contig_free(&sc->kw_dma); } static int iwn_alloc_ict(struct iwn_softc *sc) { /* ICT table must be aligned on a 4KB boundary. */ return iwn_dma_contig_alloc(sc, &sc->ict_dma, (void **)&sc->ict, IWN_ICT_SIZE, 4096); } static void iwn_free_ict(struct iwn_softc *sc) { iwn_dma_contig_free(&sc->ict_dma); } static int iwn_alloc_fwmem(struct iwn_softc *sc) { /* Must be aligned on a 16-byte boundary. */ return iwn_dma_contig_alloc(sc, &sc->fw_dma, NULL, sc->fwsz, 16); } static void iwn_free_fwmem(struct iwn_softc *sc) { iwn_dma_contig_free(&sc->fw_dma); } static int iwn_alloc_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) { bus_size_t size; int i, error; ring->cur = 0; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Allocate RX descriptors (256-byte aligned). */ size = IWN_RX_RING_COUNT * sizeof (uint32_t); error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, size, 256); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate RX ring DMA memory, error %d\n", __func__, error); goto fail; } /* Allocate RX status area (16-byte aligned). */ error = iwn_dma_contig_alloc(sc, &ring->stat_dma, (void **)&ring->stat, sizeof (struct iwn_rx_status), 16); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate RX status DMA memory, error %d\n", __func__, error); goto fail; } /* Create RX buffer DMA tag. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, IWN_RBUF_SIZE, 1, IWN_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "%s: could not create RX buf DMA tag, error %d\n", __func__, error); goto fail; } /* * Allocate and map RX buffers. */ for (i = 0; i < IWN_RX_RING_COUNT; i++) { struct iwn_rx_data *data = &ring->data[i]; bus_addr_t paddr; error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "%s: could not create RX buf DMA map, error %d\n", __func__, error); goto fail; } data->m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); if (data->m == NULL) { device_printf(sc->sc_dev, "%s: could not allocate RX mbuf\n", __func__); error = ENOBUFS; goto fail; } error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "%s: can't map mbuf, error %d\n", __func__, error); goto fail; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD); /* Set physical address of RX buffer (256-byte aligned). */ ring->desc[i] = htole32(paddr >> 8); } bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return 0; fail: iwn_free_rx_ring(sc, ring); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); return error; } static void iwn_reset_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) { int ntries; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); if (iwn_nic_lock(sc) == 0) { IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); for (ntries = 0; ntries < 1000; ntries++) { if (IWN_READ(sc, IWN_FH_RX_STATUS) & IWN_FH_RX_STATUS_IDLE) break; DELAY(10); } iwn_nic_unlock(sc); } ring->cur = 0; sc->last_rx_valid = 0; } static void iwn_free_rx_ring(struct iwn_softc *sc, struct iwn_rx_ring *ring) { int i; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); iwn_dma_contig_free(&ring->desc_dma); iwn_dma_contig_free(&ring->stat_dma); for (i = 0; i < IWN_RX_RING_COUNT; i++) { struct iwn_rx_data *data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } if (ring->data_dmat != NULL) { bus_dma_tag_destroy(ring->data_dmat); ring->data_dmat = NULL; } } static int iwn_alloc_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring, int qid) { bus_addr_t paddr; bus_size_t size; int i, error; ring->qid = qid; ring->queued = 0; ring->cur = 0; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Allocate TX descriptors (256-byte aligned). */ size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_desc); error = iwn_dma_contig_alloc(sc, &ring->desc_dma, (void **)&ring->desc, size, 256); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate TX ring DMA memory, error %d\n", __func__, error); goto fail; } size = IWN_TX_RING_COUNT * sizeof (struct iwn_tx_cmd); error = iwn_dma_contig_alloc(sc, &ring->cmd_dma, (void **)&ring->cmd, size, 4); if (error != 0) { device_printf(sc->sc_dev, "%s: could not allocate TX cmd DMA memory, error %d\n", __func__, error); goto fail; } error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, MCLBYTES, IWN_MAX_SCATTER - 1, MCLBYTES, 0, NULL, NULL, &ring->data_dmat); if (error != 0) { device_printf(sc->sc_dev, "%s: could not create TX buf DMA tag, error %d\n", __func__, error); goto fail; } paddr = ring->cmd_dma.paddr; for (i = 0; i < IWN_TX_RING_COUNT; i++) { struct iwn_tx_data *data = &ring->data[i]; data->cmd_paddr = paddr; data->scratch_paddr = paddr + 12; paddr += sizeof (struct iwn_tx_cmd); error = bus_dmamap_create(ring->data_dmat, 0, &data->map); if (error != 0) { device_printf(sc->sc_dev, "%s: could not create TX buf DMA map, error %d\n", __func__, error); goto fail; } } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); return 0; fail: iwn_free_tx_ring(sc, ring); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); return error; } static void iwn_reset_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) { int i; DPRINTF(sc, IWN_DEBUG_TRACE, "->doing %s \n", __func__); for (i = 0; i < IWN_TX_RING_COUNT; i++) { struct iwn_tx_data *data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } if (data->ni != NULL) { ieee80211_free_node(data->ni); data->ni = NULL; } } /* Clear TX descriptors. */ memset(ring->desc, 0, ring->desc_dma.size); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); sc->qfullmsk &= ~(1 << ring->qid); ring->queued = 0; ring->cur = 0; } static void iwn_free_tx_ring(struct iwn_softc *sc, struct iwn_tx_ring *ring) { int i; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s \n", __func__); iwn_dma_contig_free(&ring->desc_dma); iwn_dma_contig_free(&ring->cmd_dma); for (i = 0; i < IWN_TX_RING_COUNT; i++) { struct iwn_tx_data *data = &ring->data[i]; if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); } if (data->map != NULL) bus_dmamap_destroy(ring->data_dmat, data->map); } if (ring->data_dmat != NULL) { bus_dma_tag_destroy(ring->data_dmat); ring->data_dmat = NULL; } } static void iwn5000_ict_reset(struct iwn_softc *sc) { /* Disable interrupts. */ IWN_WRITE(sc, IWN_INT_MASK, 0); /* Reset ICT table. */ memset(sc->ict, 0, IWN_ICT_SIZE); sc->ict_cur = 0; bus_dmamap_sync(sc->ict_dma.tag, sc->ict_dma.map, BUS_DMASYNC_PREWRITE); /* Set physical address of ICT table (4KB aligned). */ DPRINTF(sc, IWN_DEBUG_RESET, "%s: enabling ICT\n", __func__); IWN_WRITE(sc, IWN_DRAM_INT_TBL, IWN_DRAM_INT_TBL_ENABLE | IWN_DRAM_INT_TBL_WRAP_CHECK | sc->ict_dma.paddr >> 12); /* Enable periodic RX interrupt. */ sc->int_mask |= IWN_INT_RX_PERIODIC; /* Switch to ICT interrupt mode in driver. */ sc->sc_flags |= IWN_FLAG_USE_ICT; /* Re-enable interrupts. */ IWN_WRITE(sc, IWN_INT, 0xffffffff); IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); } static int iwn_read_eeprom(struct iwn_softc *sc, uint8_t macaddr[IEEE80211_ADDR_LEN]) { struct iwn_ops *ops = &sc->ops; uint16_t val; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Check whether adapter has an EEPROM or an OTPROM. */ if (sc->hw_type >= IWN_HW_REV_TYPE_1000 && (IWN_READ(sc, IWN_OTP_GP) & IWN_OTP_GP_DEV_SEL_OTP)) sc->sc_flags |= IWN_FLAG_HAS_OTPROM; DPRINTF(sc, IWN_DEBUG_RESET, "%s found\n", (sc->sc_flags & IWN_FLAG_HAS_OTPROM) ? "OTPROM" : "EEPROM"); /* Adapter has to be powered on for EEPROM access to work. */ if ((error = iwn_apm_init(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not power ON adapter, error %d\n", __func__, error); return error; } if ((IWN_READ(sc, IWN_EEPROM_GP) & 0x7) == 0) { device_printf(sc->sc_dev, "%s: bad ROM signature\n", __func__); return EIO; } if ((error = iwn_eeprom_lock(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not lock ROM, error %d\n", __func__, error); return error; } if (sc->sc_flags & IWN_FLAG_HAS_OTPROM) { if ((error = iwn_init_otprom(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not initialize OTPROM, error %d\n", __func__, error); return error; } } iwn_read_prom_data(sc, IWN_EEPROM_SKU_CAP, &val, 2); DPRINTF(sc, IWN_DEBUG_RESET, "SKU capabilities=0x%04x\n", le16toh(val)); /* Check if HT support is bonded out. */ if (val & htole16(IWN_EEPROM_SKU_CAP_11N)) sc->sc_flags |= IWN_FLAG_HAS_11N; iwn_read_prom_data(sc, IWN_EEPROM_RFCFG, &val, 2); sc->rfcfg = le16toh(val); DPRINTF(sc, IWN_DEBUG_RESET, "radio config=0x%04x\n", sc->rfcfg); /* Read Tx/Rx chains from ROM unless it's known to be broken. */ if (sc->txchainmask == 0) sc->txchainmask = IWN_RFCFG_TXANTMSK(sc->rfcfg); if (sc->rxchainmask == 0) sc->rxchainmask = IWN_RFCFG_RXANTMSK(sc->rfcfg); /* Read MAC address. */ iwn_read_prom_data(sc, IWN_EEPROM_MAC, macaddr, 6); /* Read adapter-specific information from EEPROM. */ ops->read_eeprom(sc); iwn_apm_stop(sc); /* Power OFF adapter. */ iwn_eeprom_unlock(sc); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); return 0; } static void iwn4965_read_eeprom(struct iwn_softc *sc) { uint32_t addr; uint16_t val; int i; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Read regulatory domain (4 ASCII characters). */ iwn_read_prom_data(sc, IWN4965_EEPROM_DOMAIN, sc->eeprom_domain, 4); /* Read the list of authorized channels (20MHz & 40MHz). */ for (i = 0; i < IWN_NBANDS - 1; i++) { addr = iwn4965_regulatory_bands[i]; iwn_read_eeprom_channels(sc, i, addr); } /* Read maximum allowed TX power for 2GHz and 5GHz bands. */ iwn_read_prom_data(sc, IWN4965_EEPROM_MAXPOW, &val, 2); sc->maxpwr2GHz = val & 0xff; sc->maxpwr5GHz = val >> 8; /* Check that EEPROM values are within valid range. */ if (sc->maxpwr5GHz < 20 || sc->maxpwr5GHz > 50) sc->maxpwr5GHz = 38; if (sc->maxpwr2GHz < 20 || sc->maxpwr2GHz > 50) sc->maxpwr2GHz = 38; DPRINTF(sc, IWN_DEBUG_RESET, "maxpwr 2GHz=%d 5GHz=%d\n", sc->maxpwr2GHz, sc->maxpwr5GHz); /* Read samples for each TX power group. */ iwn_read_prom_data(sc, IWN4965_EEPROM_BANDS, sc->bands, sizeof sc->bands); /* Read voltage at which samples were taken. */ iwn_read_prom_data(sc, IWN4965_EEPROM_VOLTAGE, &val, 2); sc->eeprom_voltage = (int16_t)le16toh(val); DPRINTF(sc, IWN_DEBUG_RESET, "voltage=%d (in 0.3V)\n", sc->eeprom_voltage); #ifdef IWN_DEBUG /* Print samples. */ if (sc->sc_debug & IWN_DEBUG_ANY) { for (i = 0; i < IWN_NBANDS - 1; i++) iwn4965_print_power_group(sc, i); } #endif DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); } #ifdef IWN_DEBUG static void iwn4965_print_power_group(struct iwn_softc *sc, int i) { struct iwn4965_eeprom_band *band = &sc->bands[i]; struct iwn4965_eeprom_chan_samples *chans = band->chans; int j, c; printf("===band %d===\n", i); printf("chan lo=%d, chan hi=%d\n", band->lo, band->hi); printf("chan1 num=%d\n", chans[0].num); for (c = 0; c < 2; c++) { for (j = 0; j < IWN_NSAMPLES; j++) { printf("chain %d, sample %d: temp=%d gain=%d " "power=%d pa_det=%d\n", c, j, chans[0].samples[c][j].temp, chans[0].samples[c][j].gain, chans[0].samples[c][j].power, chans[0].samples[c][j].pa_det); } } printf("chan2 num=%d\n", chans[1].num); for (c = 0; c < 2; c++) { for (j = 0; j < IWN_NSAMPLES; j++) { printf("chain %d, sample %d: temp=%d gain=%d " "power=%d pa_det=%d\n", c, j, chans[1].samples[c][j].temp, chans[1].samples[c][j].gain, chans[1].samples[c][j].power, chans[1].samples[c][j].pa_det); } } } #endif static void iwn5000_read_eeprom(struct iwn_softc *sc) { struct iwn5000_eeprom_calib_hdr hdr; int32_t volt; uint32_t base, addr; uint16_t val; int i; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Read regulatory domain (4 ASCII characters). */ iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); base = le16toh(val); iwn_read_prom_data(sc, base + IWN5000_EEPROM_DOMAIN, sc->eeprom_domain, 4); /* Read the list of authorized channels (20MHz & 40MHz). */ for (i = 0; i < IWN_NBANDS - 1; i++) { addr = base + sc->base_params->regulatory_bands[i]; iwn_read_eeprom_channels(sc, i, addr); } /* Read enhanced TX power information for 6000 Series. */ if (sc->base_params->enhanced_TX_power) iwn_read_eeprom_enhinfo(sc); iwn_read_prom_data(sc, IWN5000_EEPROM_CAL, &val, 2); base = le16toh(val); iwn_read_prom_data(sc, base, &hdr, sizeof hdr); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: calib version=%u pa type=%u voltage=%u\n", __func__, hdr.version, hdr.pa_type, le16toh(hdr.volt)); sc->calib_ver = hdr.version; if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { sc->eeprom_voltage = le16toh(hdr.volt); iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); sc->eeprom_temp_high=le16toh(val); iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); sc->eeprom_temp = le16toh(val); } if (sc->hw_type == IWN_HW_REV_TYPE_5150) { /* Compute temperature offset. */ iwn_read_prom_data(sc, base + IWN5000_EEPROM_TEMP, &val, 2); sc->eeprom_temp = le16toh(val); iwn_read_prom_data(sc, base + IWN5000_EEPROM_VOLT, &val, 2); volt = le16toh(val); sc->temp_off = sc->eeprom_temp - (volt / -5); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "temp=%d volt=%d offset=%dK\n", sc->eeprom_temp, volt, sc->temp_off); } else { /* Read crystal calibration. */ iwn_read_prom_data(sc, base + IWN5000_EEPROM_CRYSTAL, &sc->eeprom_crystal, sizeof (uint32_t)); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "crystal calibration 0x%08x\n", le32toh(sc->eeprom_crystal)); } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); } /* * Translate EEPROM flags to net80211. */ static uint32_t iwn_eeprom_channel_flags(struct iwn_eeprom_chan *channel) { uint32_t nflags; nflags = 0; if ((channel->flags & IWN_EEPROM_CHAN_ACTIVE) == 0) nflags |= IEEE80211_CHAN_PASSIVE; if ((channel->flags & IWN_EEPROM_CHAN_IBSS) == 0) nflags |= IEEE80211_CHAN_NOADHOC; if (channel->flags & IWN_EEPROM_CHAN_RADAR) { nflags |= IEEE80211_CHAN_DFS; /* XXX apparently IBSS may still be marked */ nflags |= IEEE80211_CHAN_NOADHOC; } return nflags; } static void iwn_read_eeprom_band(struct iwn_softc *sc, int n, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; const struct iwn_chan_band *band = &iwn_bands[n]; uint8_t bands[IEEE80211_MODE_BYTES]; uint8_t chan; int i, error, nflags; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); memset(bands, 0, sizeof(bands)); if (n == 0) { setbit(bands, IEEE80211_MODE_11B); setbit(bands, IEEE80211_MODE_11G); if (sc->sc_flags & IWN_FLAG_HAS_11N) setbit(bands, IEEE80211_MODE_11NG); } else { setbit(bands, IEEE80211_MODE_11A); if (sc->sc_flags & IWN_FLAG_HAS_11N) setbit(bands, IEEE80211_MODE_11NA); } for (i = 0; i < band->nchan; i++) { if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { DPRINTF(sc, IWN_DEBUG_RESET, "skip chan %d flags 0x%x maxpwr %d\n", band->chan[i], channels[i].flags, channels[i].maxpwr); continue; } chan = band->chan[i]; nflags = iwn_eeprom_channel_flags(&channels[i]); error = ieee80211_add_channel(chans, maxchans, nchans, chan, 0, channels[i].maxpwr, nflags, bands); if (error != 0) break; /* Save maximum allowed TX power for this channel. */ /* XXX wrong */ sc->maxpwr[chan] = channels[i].maxpwr; DPRINTF(sc, IWN_DEBUG_RESET, "add chan %d flags 0x%x maxpwr %d\n", chan, channels[i].flags, channels[i].maxpwr); } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); } static void iwn_read_eeprom_ht40(struct iwn_softc *sc, int n, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct iwn_eeprom_chan *channels = sc->eeprom_channels[n]; const struct iwn_chan_band *band = &iwn_bands[n]; uint8_t chan; int i, error, nflags; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s start\n", __func__); if (!(sc->sc_flags & IWN_FLAG_HAS_11N)) { DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end no 11n\n", __func__); return; } for (i = 0; i < band->nchan; i++) { if (!(channels[i].flags & IWN_EEPROM_CHAN_VALID)) { DPRINTF(sc, IWN_DEBUG_RESET, "skip chan %d flags 0x%x maxpwr %d\n", band->chan[i], channels[i].flags, channels[i].maxpwr); continue; } chan = band->chan[i]; nflags = iwn_eeprom_channel_flags(&channels[i]); nflags |= (n == 5 ? IEEE80211_CHAN_G : IEEE80211_CHAN_A); error = ieee80211_add_channel_ht40(chans, maxchans, nchans, chan, channels[i].maxpwr, nflags); switch (error) { case EINVAL: device_printf(sc->sc_dev, "%s: no entry for channel %d\n", __func__, chan); continue; case ENOENT: DPRINTF(sc, IWN_DEBUG_RESET, "%s: skip chan %d, extension channel not found\n", __func__, chan); continue; case ENOBUFS: device_printf(sc->sc_dev, "%s: channel table is full!\n", __func__); break; case 0: DPRINTF(sc, IWN_DEBUG_RESET, "add ht40 chan %d flags 0x%x maxpwr %d\n", chan, channels[i].flags, channels[i].maxpwr); /* FALLTHROUGH */ default: break; } } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); } static void iwn_read_eeprom_channels(struct iwn_softc *sc, int n, uint32_t addr) { struct ieee80211com *ic = &sc->sc_ic; iwn_read_prom_data(sc, addr, &sc->eeprom_channels[n], iwn_bands[n].nchan * sizeof (struct iwn_eeprom_chan)); if (n < 5) { iwn_read_eeprom_band(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); } else { iwn_read_eeprom_ht40(sc, n, IEEE80211_CHAN_MAX, &ic->ic_nchans, ic->ic_channels); } ieee80211_sort_channels(ic->ic_channels, ic->ic_nchans); } static struct iwn_eeprom_chan * iwn_find_eeprom_channel(struct iwn_softc *sc, struct ieee80211_channel *c) { int band, chan, i, j; if (IEEE80211_IS_CHAN_HT40(c)) { band = IEEE80211_IS_CHAN_5GHZ(c) ? 6 : 5; if (IEEE80211_IS_CHAN_HT40D(c)) chan = c->ic_extieee; else chan = c->ic_ieee; for (i = 0; i < iwn_bands[band].nchan; i++) { if (iwn_bands[band].chan[i] == chan) return &sc->eeprom_channels[band][i]; } } else { for (j = 0; j < 5; j++) { for (i = 0; i < iwn_bands[j].nchan; i++) { if (iwn_bands[j].chan[i] == c->ic_ieee && ((j == 0) ^ IEEE80211_IS_CHAN_A(c)) == 1) return &sc->eeprom_channels[j][i]; } } } return NULL; } static void iwn_getradiocaps(struct ieee80211com *ic, int maxchans, int *nchans, struct ieee80211_channel chans[]) { struct iwn_softc *sc = ic->ic_softc; int i; /* Parse the list of authorized channels. */ for (i = 0; i < 5 && *nchans < maxchans; i++) iwn_read_eeprom_band(sc, i, maxchans, nchans, chans); for (i = 5; i < IWN_NBANDS - 1 && *nchans < maxchans; i++) iwn_read_eeprom_ht40(sc, i, maxchans, nchans, chans); } /* * Enforce flags read from EEPROM. */ static int iwn_setregdomain(struct ieee80211com *ic, struct ieee80211_regdomain *rd, int nchan, struct ieee80211_channel chans[]) { struct iwn_softc *sc = ic->ic_softc; int i; for (i = 0; i < nchan; i++) { struct ieee80211_channel *c = &chans[i]; struct iwn_eeprom_chan *channel; channel = iwn_find_eeprom_channel(sc, c); if (channel == NULL) { ic_printf(ic, "%s: invalid channel %u freq %u/0x%x\n", __func__, c->ic_ieee, c->ic_freq, c->ic_flags); return EINVAL; } c->ic_flags |= iwn_eeprom_channel_flags(channel); } return 0; } static void iwn_read_eeprom_enhinfo(struct iwn_softc *sc) { struct iwn_eeprom_enhinfo enhinfo[35]; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_channel *c; uint16_t val, base; int8_t maxpwr; uint8_t flags; int i, j; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); iwn_read_prom_data(sc, IWN5000_EEPROM_REG, &val, 2); base = le16toh(val); iwn_read_prom_data(sc, base + IWN6000_EEPROM_ENHINFO, enhinfo, sizeof enhinfo); for (i = 0; i < nitems(enhinfo); i++) { flags = enhinfo[i].flags; if (!(flags & IWN_ENHINFO_VALID)) continue; /* Skip invalid entries. */ maxpwr = 0; if (sc->txchainmask & IWN_ANT_A) maxpwr = MAX(maxpwr, enhinfo[i].chain[0]); if (sc->txchainmask & IWN_ANT_B) maxpwr = MAX(maxpwr, enhinfo[i].chain[1]); if (sc->txchainmask & IWN_ANT_C) maxpwr = MAX(maxpwr, enhinfo[i].chain[2]); if (sc->ntxchains == 2) maxpwr = MAX(maxpwr, enhinfo[i].mimo2); else if (sc->ntxchains == 3) maxpwr = MAX(maxpwr, enhinfo[i].mimo3); for (j = 0; j < ic->ic_nchans; j++) { c = &ic->ic_channels[j]; if ((flags & IWN_ENHINFO_5GHZ)) { if (!IEEE80211_IS_CHAN_A(c)) continue; } else if ((flags & IWN_ENHINFO_OFDM)) { if (!IEEE80211_IS_CHAN_G(c)) continue; } else if (!IEEE80211_IS_CHAN_B(c)) continue; if ((flags & IWN_ENHINFO_HT40)) { if (!IEEE80211_IS_CHAN_HT40(c)) continue; } else { if (IEEE80211_IS_CHAN_HT40(c)) continue; } if (enhinfo[i].chan != 0 && enhinfo[i].chan != c->ic_ieee) continue; DPRINTF(sc, IWN_DEBUG_RESET, "channel %d(%x), maxpwr %d\n", c->ic_ieee, c->ic_flags, maxpwr / 2); c->ic_maxregpower = maxpwr / 2; c->ic_maxpower = maxpwr; } } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end\n", __func__); } static struct ieee80211_node * iwn_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN]) { struct iwn_node *wn; wn = malloc(sizeof (struct iwn_node), M_80211_NODE, M_NOWAIT | M_ZERO); if (wn == NULL) return (NULL); wn->id = IWN_ID_UNDEFINED; return (&wn->ni); } static __inline int rate2plcp(int rate) { switch (rate & 0xff) { case 12: return 0xd; case 18: return 0xf; case 24: return 0x5; case 36: return 0x7; case 48: return 0x9; case 72: return 0xb; case 96: return 0x1; case 108: return 0x3; case 2: return 10; case 4: return 20; case 11: return 55; case 22: return 110; } return 0; } static __inline uint8_t plcp2rate(const uint8_t rate_plcp) { switch (rate_plcp) { case 0xd: return 12; case 0xf: return 18; case 0x5: return 24; case 0x7: return 36; case 0x9: return 48; case 0xb: return 72; case 0x1: return 96; case 0x3: return 108; case 10: return 2; case 20: return 4; case 55: return 11; case 110: return 22; default: return 0; } } static int iwn_get_1stream_tx_antmask(struct iwn_softc *sc) { return IWN_LSB(sc->txchainmask); } static int iwn_get_2stream_tx_antmask(struct iwn_softc *sc) { int tx; /* * The '2 stream' setup is a bit .. odd. * * For NICs that support only 1 antenna, default to IWN_ANT_AB or * the firmware panics (eg Intel 5100.) * * For NICs that support two antennas, we use ANT_AB. * * For NICs that support three antennas, we use the two that * wasn't the default one. * * XXX TODO: if bluetooth (full concurrent) is enabled, restrict * this to only one antenna. */ /* Default - transmit on the other antennas */ tx = (sc->txchainmask & ~IWN_LSB(sc->txchainmask)); /* Now, if it's zero, set it to IWN_ANT_AB, so to not panic firmware */ if (tx == 0) tx = IWN_ANT_AB; /* * If the NIC is a two-stream TX NIC, configure the TX mask to * the default chainmask */ else if (sc->ntxchains == 2) tx = sc->txchainmask; return (tx); } /* * Calculate the required PLCP value from the given rate, * to the given node. * * This will take the node configuration (eg 11n, rate table * setup, etc) into consideration. */ static uint32_t iwn_rate_to_plcp(struct iwn_softc *sc, struct ieee80211_node *ni, uint8_t rate) { struct ieee80211com *ic = ni->ni_ic; uint32_t plcp = 0; int ridx; /* * If it's an MCS rate, let's set the plcp correctly * and set the relevant flags based on the node config. */ if (rate & IEEE80211_RATE_MCS) { /* * Set the initial PLCP value to be between 0->31 for * MCS 0 -> MCS 31, then set the "I'm an MCS rate!" * flag. */ plcp = IEEE80211_RV(rate) | IWN_RFLAG_MCS; /* * XXX the following should only occur if both * the local configuration _and_ the remote node * advertise these capabilities. Thus this code * may need fixing! */ /* * Set the channel width and guard interval. */ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) { plcp |= IWN_RFLAG_HT40; if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI40) plcp |= IWN_RFLAG_SGI; } else if (ni->ni_htcap & IEEE80211_HTCAP_SHORTGI20) { plcp |= IWN_RFLAG_SGI; } /* * Ensure the selected rate matches the link quality * table entries being used. */ if (rate > 0x8f) plcp |= IWN_RFLAG_ANT(sc->txchainmask); else if (rate > 0x87) plcp |= IWN_RFLAG_ANT(iwn_get_2stream_tx_antmask(sc)); else plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc)); } else { /* * Set the initial PLCP - fine for both * OFDM and CCK rates. */ plcp = rate2plcp(rate); /* Set CCK flag if it's CCK */ /* XXX It would be nice to have a method * to map the ridx -> phy table entry * so we could just query that, rather than * this hack to check against IWN_RIDX_OFDM6. */ ridx = ieee80211_legacy_rate_lookup(ic->ic_rt, rate & IEEE80211_RATE_VAL); if (ridx < IWN_RIDX_OFDM6 && IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) plcp |= IWN_RFLAG_CCK; /* Set antenna configuration */ /* XXX TODO: is this the right antenna to use for legacy? */ plcp |= IWN_RFLAG_ANT(iwn_get_1stream_tx_antmask(sc)); } DPRINTF(sc, IWN_DEBUG_TXRATE, "%s: rate=0x%02x, plcp=0x%08x\n", __func__, rate, plcp); return (htole32(plcp)); } static void iwn_newassoc(struct ieee80211_node *ni, int isnew) { /* Doesn't do anything at the moment */ } static int iwn_media_change(struct ifnet *ifp) { int error; error = ieee80211_media_change(ifp); /* NB: only the fixed rate can change and that doesn't need a reset */ return (error == ENETRESET ? 0 : error); } static int iwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) { struct iwn_vap *ivp = IWN_VAP(vap); struct ieee80211com *ic = vap->iv_ic; struct iwn_softc *sc = ic->ic_softc; int error = 0; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); DPRINTF(sc, IWN_DEBUG_STATE, "%s: %s -> %s\n", __func__, ieee80211_state_name[vap->iv_state], ieee80211_state_name[nstate]); IEEE80211_UNLOCK(ic); IWN_LOCK(sc); callout_stop(&sc->calib_to); sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; switch (nstate) { case IEEE80211_S_ASSOC: if (vap->iv_state != IEEE80211_S_RUN) break; /* FALLTHROUGH */ case IEEE80211_S_AUTH: if (vap->iv_state == IEEE80211_S_AUTH) break; /* * !AUTH -> AUTH transition requires state reset to handle * reassociations correctly. */ sc->rxon->associd = 0; sc->rxon->filter &= ~htole32(IWN_FILTER_BSS); sc->calib.state = IWN_CALIB_STATE_INIT; /* Wait until we hear a beacon before we transmit */ if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan)) sc->sc_beacon_wait = 1; if ((error = iwn_auth(sc, vap)) != 0) { device_printf(sc->sc_dev, "%s: could not move to auth state\n", __func__); } break; case IEEE80211_S_RUN: /* * RUN -> RUN transition; Just restart the timers. */ if (vap->iv_state == IEEE80211_S_RUN) { sc->calib_cnt = 0; break; } /* Wait until we hear a beacon before we transmit */ if (IEEE80211_IS_CHAN_PASSIVE(ic->ic_curchan)) sc->sc_beacon_wait = 1; /* * !RUN -> RUN requires setting the association id * which is done with a firmware cmd. We also defer * starting the timers until that work is done. */ if ((error = iwn_run(sc, vap)) != 0) { device_printf(sc->sc_dev, "%s: could not move to run state\n", __func__); } break; case IEEE80211_S_INIT: sc->calib.state = IWN_CALIB_STATE_INIT; /* * Purge the xmit queue so we don't have old frames * during a new association attempt. */ sc->sc_beacon_wait = 0; iwn_xmit_queue_drain(sc); break; default: break; } IWN_UNLOCK(sc); IEEE80211_LOCK(ic); if (error != 0){ DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); return error; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return ivp->iv_newstate(vap, nstate, arg); } static void iwn_calib_timeout(void *arg) { struct iwn_softc *sc = arg; IWN_LOCK_ASSERT(sc); /* Force automatic TX power calibration every 60 secs. */ if (++sc->calib_cnt >= 120) { uint32_t flags = 0; DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s\n", "sending request for statistics"); (void)iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); sc->calib_cnt = 0; } callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, sc); } /* * Process an RX_PHY firmware notification. This is usually immediately * followed by an MPDU_RX_DONE notification. */ static void iwn_rx_phy(struct iwn_softc *sc, struct iwn_rx_desc *desc) { struct iwn_rx_stat *stat = (struct iwn_rx_stat *)(desc + 1); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: received PHY stats\n", __func__); /* Save RX statistics, they will be used on MPDU_RX_DONE. */ memcpy(&sc->last_rx_stat, stat, sizeof (*stat)); sc->last_rx_valid = 1; } /* * Process an RX_DONE (4965AGN only) or MPDU_RX_DONE firmware notification. * Each MPDU_RX_DONE notification must be preceded by an RX_PHY one. */ static void iwn_rx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, struct iwn_rx_data *data) { struct iwn_ops *ops = &sc->ops; struct ieee80211com *ic = &sc->sc_ic; struct iwn_rx_ring *ring = &sc->rxq; struct ieee80211_frame_min *wh; struct ieee80211_node *ni; struct mbuf *m, *m1; struct iwn_rx_stat *stat; caddr_t head; bus_addr_t paddr; uint32_t flags; int error, len, rssi, nf; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); if (desc->type == IWN_MPDU_RX_DONE) { /* Check for prior RX_PHY notification. */ if (!sc->last_rx_valid) { DPRINTF(sc, IWN_DEBUG_ANY, "%s: missing RX_PHY\n", __func__); return; } stat = &sc->last_rx_stat; } else stat = (struct iwn_rx_stat *)(desc + 1); if (stat->cfg_phy_len > IWN_STAT_MAXLEN) { device_printf(sc->sc_dev, "%s: invalid RX statistic header, len %d\n", __func__, stat->cfg_phy_len); return; } if (desc->type == IWN_MPDU_RX_DONE) { struct iwn_rx_mpdu *mpdu = (struct iwn_rx_mpdu *)(desc + 1); head = (caddr_t)(mpdu + 1); len = le16toh(mpdu->len); } else { head = (caddr_t)(stat + 1) + stat->cfg_phy_len; len = le16toh(stat->len); } flags = le32toh(*(uint32_t *)(head + len)); /* Discard frames with a bad FCS early. */ if ((flags & IWN_RX_NOERROR) != IWN_RX_NOERROR) { DPRINTF(sc, IWN_DEBUG_RECV, "%s: RX flags error %x\n", __func__, flags); counter_u64_add(ic->ic_ierrors, 1); return; } /* Discard frames that are too short. */ if (len < sizeof (struct ieee80211_frame_ack)) { DPRINTF(sc, IWN_DEBUG_RECV, "%s: frame too short: %d\n", __func__, len); counter_u64_add(ic->ic_ierrors, 1); return; } m1 = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWN_RBUF_SIZE); if (m1 == NULL) { DPRINTF(sc, IWN_DEBUG_ANY, "%s: no mbuf to restock ring\n", __func__); counter_u64_add(ic->ic_ierrors, 1); return; } bus_dmamap_unload(ring->data_dmat, data->map); error = bus_dmamap_load(ring->data_dmat, data->map, mtod(m1, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { device_printf(sc->sc_dev, "%s: bus_dmamap_load failed, error %d\n", __func__, error); m_freem(m1); /* Try to reload the old mbuf. */ error = bus_dmamap_load(ring->data_dmat, data->map, mtod(data->m, void *), IWN_RBUF_SIZE, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0 && error != EFBIG) { panic("%s: could not load old RX mbuf", __func__); } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD); /* Physical address may have changed. */ ring->desc[ring->cur] = htole32(paddr >> 8); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); counter_u64_add(ic->ic_ierrors, 1); return; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD); m = data->m; data->m = m1; /* Update RX descriptor. */ ring->desc[ring->cur] = htole32(paddr >> 8); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); /* Finalize mbuf. */ m->m_data = head; m->m_pkthdr.len = m->m_len = len; /* Grab a reference to the source node. */ wh = mtod(m, struct ieee80211_frame_min *); if (len >= sizeof(struct ieee80211_frame_min)) ni = ieee80211_find_rxnode(ic, wh); else ni = NULL; nf = (ni != NULL && ni->ni_vap->iv_state == IEEE80211_S_RUN && (ic->ic_flags & IEEE80211_F_SCAN) == 0) ? sc->noise : -95; rssi = ops->get_rssi(sc, stat); if (ieee80211_radiotap_active(ic)) { struct iwn_rx_radiotap_header *tap = &sc->sc_rxtap; uint32_t rate = le32toh(stat->rate); tap->wr_flags = 0; if (stat->flags & htole16(IWN_STAT_FLAG_SHPREAMBLE)) tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE; tap->wr_dbm_antsignal = (int8_t)rssi; tap->wr_dbm_antnoise = (int8_t)nf; tap->wr_tsft = stat->tstamp; if (rate & IWN_RFLAG_MCS) { tap->wr_rate = rate & IWN_RFLAG_RATE_MCS; tap->wr_rate |= IEEE80211_RATE_MCS; } else tap->wr_rate = plcp2rate(rate & IWN_RFLAG_RATE); } /* * If it's a beacon and we're waiting, then do the * wakeup. This should unblock raw_xmit/start. */ if (sc->sc_beacon_wait) { uint8_t type, subtype; /* NB: Re-assign wh */ wh = mtod(m, struct ieee80211_frame_min *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* * This assumes at this point we've received our own * beacon. */ DPRINTF(sc, IWN_DEBUG_TRACE, "%s: beacon_wait, type=%d, subtype=%d\n", __func__, type, subtype); if (type == IEEE80211_FC0_TYPE_MGT && subtype == IEEE80211_FC0_SUBTYPE_BEACON) { DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "%s: waking things up\n", __func__); /* queue taskqueue to transmit! */ taskqueue_enqueue(sc->sc_tq, &sc->sc_xmit_task); } } IWN_UNLOCK(sc); /* Send the frame to the 802.11 layer. */ if (ni != NULL) { if (ni->ni_flags & IEEE80211_NODE_HT) m->m_flags |= M_AMPDU; (void)ieee80211_input(ni, m, rssi - nf, nf); /* Node is no longer needed. */ ieee80211_free_node(ni); } else (void)ieee80211_input_all(ic, m, rssi - nf, nf); IWN_LOCK(sc); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); } /* Process an incoming Compressed BlockAck. */ static void iwn_rx_compressed_ba(struct iwn_softc *sc, struct iwn_rx_desc *desc) { struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; struct iwn_ops *ops = &sc->ops; struct iwn_node *wn; struct ieee80211_node *ni; struct iwn_compressed_ba *ba = (struct iwn_compressed_ba *)(desc + 1); struct iwn_tx_ring *txq; struct iwn_tx_data *txdata; struct ieee80211_tx_ampdu *tap; struct mbuf *m; uint64_t bitmap; uint16_t ssn; uint8_t tid; int i, lastidx, qid, *res, shift; int tx_ok = 0, tx_err = 0; DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s begin\n", __func__); qid = le16toh(ba->qid); txq = &sc->txq[ba->qid]; tap = sc->qid2tap[ba->qid]; tid = tap->txa_tid; wn = (void *)tap->txa_ni; res = NULL; ssn = 0; if (!IEEE80211_AMPDU_RUNNING(tap)) { res = tap->txa_private; ssn = tap->txa_start & 0xfff; } for (lastidx = le16toh(ba->ssn) & 0xff; txq->read != lastidx;) { txdata = &txq->data[txq->read]; /* Unmap and free mbuf. */ bus_dmamap_sync(txq->data_dmat, txdata->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(txq->data_dmat, txdata->map); m = txdata->m, txdata->m = NULL; ni = txdata->ni, txdata->ni = NULL; KASSERT(ni != NULL, ("no node")); KASSERT(m != NULL, ("no mbuf")); DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m); ieee80211_tx_complete(ni, m, 1); txq->queued--; txq->read = (txq->read + 1) % IWN_TX_RING_COUNT; } if (txq->queued == 0 && res != NULL) { iwn_nic_lock(sc); ops->ampdu_tx_stop(sc, qid, tid, ssn); iwn_nic_unlock(sc); sc->qid2tap[qid] = NULL; free(res, M_DEVBUF); return; } if (wn->agg[tid].bitmap == 0) return; shift = wn->agg[tid].startidx - ((le16toh(ba->seq) >> 4) & 0xff); if (shift < 0) shift += 0x100; if (wn->agg[tid].nframes > (64 - shift)) return; /* * Walk the bitmap and calculate how many successful and failed * attempts are made. * * Yes, the rate control code doesn't know these are A-MPDU * subframes and that it's okay to fail some of these. */ ni = tap->txa_ni; bitmap = (le64toh(ba->bitmap) >> shift) & wn->agg[tid].bitmap; for (i = 0; bitmap; i++) { txs->flags = 0; /* XXX TODO */ if ((bitmap & 1) == 0) { tx_err ++; txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; } else { tx_ok ++; txs->status = IEEE80211_RATECTL_TX_SUCCESS; } ieee80211_ratectl_tx_complete(ni, txs); bitmap >>= 1; } DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s: end; %d ok; %d err\n",__func__, tx_ok, tx_err); } /* * Process a CALIBRATION_RESULT notification sent by the initialization * firmware on response to a CMD_CALIB_CONFIG command (5000 only). */ static void iwn5000_rx_calib_results(struct iwn_softc *sc, struct iwn_rx_desc *desc) { struct iwn_phy_calib *calib = (struct iwn_phy_calib *)(desc + 1); int len, idx = -1; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Runtime firmware should not send such a notification. */ if (sc->sc_flags & IWN_FLAG_CALIB_DONE){ DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received after calib done\n", __func__); return; } len = (le32toh(desc->len) & 0x3fff) - 4; switch (calib->code) { case IWN5000_PHY_CALIB_DC: if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_DC) idx = 0; break; case IWN5000_PHY_CALIB_LO: if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_LO) idx = 1; break; case IWN5000_PHY_CALIB_TX_IQ: if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ) idx = 2; break; case IWN5000_PHY_CALIB_TX_IQ_PERIODIC: if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TX_IQ_PERIODIC) idx = 3; break; case IWN5000_PHY_CALIB_BASE_BAND: if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_BASE_BAND) idx = 4; break; } if (idx == -1) /* Ignore other results. */ return; /* Save calibration result. */ if (sc->calibcmd[idx].buf != NULL) free(sc->calibcmd[idx].buf, M_DEVBUF); sc->calibcmd[idx].buf = malloc(len, M_DEVBUF, M_NOWAIT); if (sc->calibcmd[idx].buf == NULL) { DPRINTF(sc, IWN_DEBUG_CALIBRATE, "not enough memory for calibration result %d\n", calib->code); return; } DPRINTF(sc, IWN_DEBUG_CALIBRATE, "saving calibration result idx=%d, code=%d len=%d\n", idx, calib->code, len); sc->calibcmd[idx].len = len; memcpy(sc->calibcmd[idx].buf, calib, len); } static void iwn_stats_update(struct iwn_softc *sc, struct iwn_calib_state *calib, struct iwn_stats *stats, int len) { struct iwn_stats_bt *stats_bt; struct iwn_stats *lstats; /* * First - check whether the length is the bluetooth or normal. * * If it's normal - just copy it and bump out. * Otherwise we have to convert things. */ if (len == sizeof(struct iwn_stats) + 4) { memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats)); sc->last_stat_valid = 1; return; } /* * If it's not the bluetooth size - log, then just copy. */ if (len != sizeof(struct iwn_stats_bt) + 4) { DPRINTF(sc, IWN_DEBUG_STATS, "%s: size of rx statistics (%d) not an expected size!\n", __func__, len); memcpy(&sc->last_stat, stats, sizeof(struct iwn_stats)); sc->last_stat_valid = 1; return; } /* * Ok. Time to copy. */ stats_bt = (struct iwn_stats_bt *) stats; lstats = &sc->last_stat; /* flags */ lstats->flags = stats_bt->flags; /* rx_bt */ memcpy(&lstats->rx.ofdm, &stats_bt->rx_bt.ofdm, sizeof(struct iwn_rx_phy_stats)); memcpy(&lstats->rx.cck, &stats_bt->rx_bt.cck, sizeof(struct iwn_rx_phy_stats)); memcpy(&lstats->rx.general, &stats_bt->rx_bt.general_bt.common, sizeof(struct iwn_rx_general_stats)); memcpy(&lstats->rx.ht, &stats_bt->rx_bt.ht, sizeof(struct iwn_rx_ht_phy_stats)); /* tx */ memcpy(&lstats->tx, &stats_bt->tx, sizeof(struct iwn_tx_stats)); /* general */ memcpy(&lstats->general, &stats_bt->general, sizeof(struct iwn_general_stats)); /* XXX TODO: Squirrel away the extra bluetooth stats somewhere */ sc->last_stat_valid = 1; } /* * Process an RX_STATISTICS or BEACON_STATISTICS firmware notification. * The latter is sent by the firmware after each received beacon. */ static void iwn_rx_statistics(struct iwn_softc *sc, struct iwn_rx_desc *desc) { struct iwn_ops *ops = &sc->ops; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); struct iwn_calib_state *calib = &sc->calib; struct iwn_stats *stats = (struct iwn_stats *)(desc + 1); struct iwn_stats *lstats; int temp; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Ignore statistics received during a scan. */ if (vap->iv_state != IEEE80211_S_RUN || (ic->ic_flags & IEEE80211_F_SCAN)){ DPRINTF(sc, IWN_DEBUG_TRACE, "->%s received during calib\n", __func__); return; } DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_STATS, "%s: received statistics, cmd %d, len %d\n", __func__, desc->type, le16toh(desc->len)); sc->calib_cnt = 0; /* Reset TX power calibration timeout. */ /* * Collect/track general statistics for reporting. * * This takes care of ensuring that the bluetooth sized message * will be correctly converted to the legacy sized message. */ iwn_stats_update(sc, calib, stats, le16toh(desc->len)); /* * And now, let's take a reference of it to use! */ lstats = &sc->last_stat; /* Test if temperature has changed. */ if (lstats->general.temp != sc->rawtemp) { /* Convert "raw" temperature to degC. */ sc->rawtemp = stats->general.temp; temp = ops->get_temperature(sc); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d\n", __func__, temp); /* Update TX power if need be (4965AGN only). */ if (sc->hw_type == IWN_HW_REV_TYPE_4965) iwn4965_power_calibration(sc, temp); } if (desc->type != IWN_BEACON_STATISTICS) return; /* Reply to a statistics request. */ sc->noise = iwn_get_noise(&lstats->rx.general); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: noise %d\n", __func__, sc->noise); /* Test that RSSI and noise are present in stats report. */ if (le32toh(lstats->rx.general.flags) != 1) { DPRINTF(sc, IWN_DEBUG_ANY, "%s\n", "received statistics without RSSI"); return; } if (calib->state == IWN_CALIB_STATE_ASSOC) iwn_collect_noise(sc, &lstats->rx.general); else if (calib->state == IWN_CALIB_STATE_RUN) { iwn_tune_sensitivity(sc, &lstats->rx); /* * XXX TODO: Only run the RX recovery if we're associated! */ iwn_check_rx_recovery(sc, lstats); iwn_save_stats_counters(sc, lstats); } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); } /* * Save the relevant statistic counters for the next calibration * pass. */ static void iwn_save_stats_counters(struct iwn_softc *sc, const struct iwn_stats *rs) { struct iwn_calib_state *calib = &sc->calib; /* Save counters values for next call. */ calib->bad_plcp_cck = le32toh(rs->rx.cck.bad_plcp); calib->fa_cck = le32toh(rs->rx.cck.fa); calib->bad_plcp_ht = le32toh(rs->rx.ht.bad_plcp); calib->bad_plcp_ofdm = le32toh(rs->rx.ofdm.bad_plcp); calib->fa_ofdm = le32toh(rs->rx.ofdm.fa); /* Last time we received these tick values */ sc->last_calib_ticks = ticks; } /* * Process a TX_DONE firmware notification. Unfortunately, the 4965AGN * and 5000 adapters have different incompatible TX status formats. */ static void iwn4965_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, struct iwn_rx_data *data) { struct iwn4965_tx_stat *stat = (struct iwn4965_tx_stat *)(desc + 1); int qid = desc->qid & IWN_RX_DESC_QID_MSK; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n", __func__, desc->qid, desc->idx, stat->rtsfailcnt, stat->ackfailcnt, stat->btkillcnt, stat->rate, le16toh(stat->duration), le32toh(stat->status)); if (qid >= sc->firstaggqueue) { iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, stat->rtsfailcnt, stat->ackfailcnt, &stat->status); } else { iwn_tx_done(sc, desc, stat->rtsfailcnt, stat->ackfailcnt, le32toh(stat->status) & 0xff); } } static void iwn5000_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, struct iwn_rx_data *data) { struct iwn5000_tx_stat *stat = (struct iwn5000_tx_stat *)(desc + 1); int qid = desc->qid & IWN_RX_DESC_QID_MSK; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: " "qid %d idx %d RTS retries %d ACK retries %d nkill %d rate %x duration %d status %x\n", __func__, desc->qid, desc->idx, stat->rtsfailcnt, stat->ackfailcnt, stat->btkillcnt, stat->rate, le16toh(stat->duration), le32toh(stat->status)); #ifdef notyet /* Reset TX scheduler slot. */ iwn5000_reset_sched(sc, qid, desc->idx); #endif if (qid >= sc->firstaggqueue) { iwn_ampdu_tx_done(sc, qid, desc->idx, stat->nframes, stat->rtsfailcnt, stat->ackfailcnt, &stat->status); } else { iwn_tx_done(sc, desc, stat->rtsfailcnt, stat->ackfailcnt, le16toh(stat->status) & 0xff); } } /* * Adapter-independent backend for TX_DONE firmware notifications. */ static void iwn_tx_done(struct iwn_softc *sc, struct iwn_rx_desc *desc, int rtsfailcnt, int ackfailcnt, uint8_t status) { struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; struct iwn_tx_ring *ring = &sc->txq[desc->qid & IWN_RX_DESC_QID_MSK]; struct iwn_tx_data *data = &ring->data[desc->idx]; struct mbuf *m; struct ieee80211_node *ni; KASSERT(data->ni != NULL, ("no node")); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Unmap and free mbuf. */ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m = data->m, data->m = NULL; ni = data->ni, data->ni = NULL; /* * Update rate control statistics for the node. */ txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY | IEEE80211_RATECTL_STATUS_LONG_RETRY; txs->short_retries = rtsfailcnt; txs->long_retries = ackfailcnt; if (!(status & IWN_TX_FAIL)) txs->status = IEEE80211_RATECTL_TX_SUCCESS; else { switch (status) { case IWN_TX_FAIL_SHORT_LIMIT: txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT; break; case IWN_TX_FAIL_LONG_LIMIT: txs->status = IEEE80211_RATECTL_TX_FAIL_LONG; break; case IWN_TX_STATUS_FAIL_LIFE_EXPIRE: txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED; break; default: txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; break; } } ieee80211_ratectl_tx_complete(ni, txs); /* * Channels marked for "radar" require traffic to be received * to unlock before we can transmit. Until traffic is seen * any attempt to transmit is returned immediately with status * set to IWN_TX_FAIL_TX_LOCKED. Unfortunately this can easily * happen on first authenticate after scanning. To workaround * this we ignore a failure of this sort in AUTH state so the * 802.11 layer will fall back to using a timeout to wait for * the AUTH reply. This allows the firmware time to see * traffic so a subsequent retry of AUTH succeeds. It's * unclear why the firmware does not maintain state for * channels recently visited as this would allow immediate * use of the channel after a scan (where we see traffic). */ if (status == IWN_TX_FAIL_TX_LOCKED && ni->ni_vap->iv_state == IEEE80211_S_AUTH) ieee80211_tx_complete(ni, m, 0); else ieee80211_tx_complete(ni, m, (status & IWN_TX_FAIL) != 0); sc->sc_tx_timer = 0; if (--ring->queued < IWN_TX_RING_LOMARK) sc->qfullmsk &= ~(1 << ring->qid); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); } /* * Process a "command done" firmware notification. This is where we wakeup * processes waiting for a synchronous command completion. */ static void iwn_cmd_done(struct iwn_softc *sc, struct iwn_rx_desc *desc) { struct iwn_tx_ring *ring; struct iwn_tx_data *data; int cmd_queue_num; if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) cmd_queue_num = IWN_PAN_CMD_QUEUE; else cmd_queue_num = IWN_CMD_QUEUE_NUM; if ((desc->qid & IWN_RX_DESC_QID_MSK) != cmd_queue_num) return; /* Not a command ack. */ ring = &sc->txq[cmd_queue_num]; data = &ring->data[desc->idx]; /* If the command was mapped in an mbuf, free it. */ if (data->m != NULL) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m_freem(data->m); data->m = NULL; } wakeup(&ring->desc[desc->idx]); } static void iwn_ampdu_tx_done(struct iwn_softc *sc, int qid, int idx, int nframes, int rtsfailcnt, int ackfailcnt, void *stat) { struct iwn_ops *ops = &sc->ops; struct iwn_tx_ring *ring = &sc->txq[qid]; struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs; struct iwn_tx_data *data; struct mbuf *m; struct iwn_node *wn; struct ieee80211_node *ni; struct ieee80211_tx_ampdu *tap; uint64_t bitmap; uint32_t *status = stat; uint16_t *aggstatus = stat; uint16_t ssn; uint8_t tid; int bit, i, lastidx, *res, seqno, shift, start; /* XXX TODO: status is le16 field! Grr */ DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); DPRINTF(sc, IWN_DEBUG_XMIT, "%s: nframes=%d, status=0x%08x\n", __func__, nframes, *status); tap = sc->qid2tap[qid]; tid = tap->txa_tid; wn = (void *)tap->txa_ni; ni = tap->txa_ni; /* * XXX TODO: ACK and RTS failures would be nice here! */ /* * A-MPDU single frame status - if we failed to transmit it * in A-MPDU, then it may be a permanent failure. * * XXX TODO: check what the Linux iwlwifi driver does here; * there's some permanent and temporary failures that may be * handled differently. */ if (nframes == 1) { txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY | IEEE80211_RATECTL_STATUS_LONG_RETRY; txs->short_retries = rtsfailcnt; txs->long_retries = ackfailcnt; if ((*status & 0xff) != 1 && (*status & 0xff) != 2) { #ifdef NOT_YET printf("ieee80211_send_bar()\n"); #endif /* * If we completely fail a transmit, make sure a * notification is pushed up to the rate control * layer. */ /* XXX */ txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED; } else { /* * If nframes=1, then we won't be getting a BA for * this frame. Ensure that we correctly update the * rate control code with how many retries were * needed to send it. */ txs->status = IEEE80211_RATECTL_TX_SUCCESS; } ieee80211_ratectl_tx_complete(ni, txs); } bitmap = 0; start = idx; for (i = 0; i < nframes; i++) { if (le16toh(aggstatus[i * 2]) & 0xc) continue; idx = le16toh(aggstatus[2*i + 1]) & 0xff; bit = idx - start; shift = 0; if (bit >= 64) { shift = 0x100 - idx + start; bit = 0; start = idx; } else if (bit <= -64) bit = 0x100 - start + idx; else if (bit < 0) { shift = start - idx; start = idx; bit = 0; } bitmap = bitmap << shift; bitmap |= 1ULL << bit; } tap = sc->qid2tap[qid]; tid = tap->txa_tid; wn = (void *)tap->txa_ni; wn->agg[tid].bitmap = bitmap; wn->agg[tid].startidx = start; wn->agg[tid].nframes = nframes; res = NULL; ssn = 0; if (!IEEE80211_AMPDU_RUNNING(tap)) { res = tap->txa_private; ssn = tap->txa_start & 0xfff; } /* This is going nframes DWORDS into the descriptor? */ seqno = le32toh(*(status + nframes)) & 0xfff; for (lastidx = (seqno & 0xff); ring->read != lastidx;) { data = &ring->data[ring->read]; /* Unmap and free mbuf. */ bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(ring->data_dmat, data->map); m = data->m, data->m = NULL; ni = data->ni, data->ni = NULL; KASSERT(ni != NULL, ("no node")); KASSERT(m != NULL, ("no mbuf")); DPRINTF(sc, IWN_DEBUG_XMIT, "%s: freeing m=%p\n", __func__, m); ieee80211_tx_complete(ni, m, 1); ring->queued--; ring->read = (ring->read + 1) % IWN_TX_RING_COUNT; } if (ring->queued == 0 && res != NULL) { iwn_nic_lock(sc); ops->ampdu_tx_stop(sc, qid, tid, ssn); iwn_nic_unlock(sc); sc->qid2tap[qid] = NULL; free(res, M_DEVBUF); return; } sc->sc_tx_timer = 0; if (ring->queued < IWN_TX_RING_LOMARK) sc->qfullmsk &= ~(1 << ring->qid); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); } /* * Process an INT_FH_RX or INT_SW_RX interrupt. */ static void iwn_notif_intr(struct iwn_softc *sc) { struct iwn_ops *ops = &sc->ops; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); uint16_t hw; bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map, BUS_DMASYNC_POSTREAD); hw = le16toh(sc->rxq.stat->closed_count) & 0xfff; while (sc->rxq.cur != hw) { struct iwn_rx_data *data = &sc->rxq.data[sc->rxq.cur]; struct iwn_rx_desc *desc; bus_dmamap_sync(sc->rxq.data_dmat, data->map, BUS_DMASYNC_POSTREAD); desc = mtod(data->m, struct iwn_rx_desc *); DPRINTF(sc, IWN_DEBUG_RECV, "%s: cur=%d; qid %x idx %d flags %x type %d(%s) len %d\n", __func__, sc->rxq.cur, desc->qid & IWN_RX_DESC_QID_MSK, desc->idx, desc->flags, desc->type, iwn_intr_str(desc->type), le16toh(desc->len)); if (!(desc->qid & IWN_UNSOLICITED_RX_NOTIF)) /* Reply to a command. */ iwn_cmd_done(sc, desc); switch (desc->type) { case IWN_RX_PHY: iwn_rx_phy(sc, desc); break; case IWN_RX_DONE: /* 4965AGN only. */ case IWN_MPDU_RX_DONE: /* An 802.11 frame has been received. */ iwn_rx_done(sc, desc, data); break; case IWN_RX_COMPRESSED_BA: /* A Compressed BlockAck has been received. */ iwn_rx_compressed_ba(sc, desc); break; case IWN_TX_DONE: /* An 802.11 frame has been transmitted. */ ops->tx_done(sc, desc, data); break; case IWN_RX_STATISTICS: case IWN_BEACON_STATISTICS: iwn_rx_statistics(sc, desc); break; case IWN_BEACON_MISSED: { struct iwn_beacon_missed *miss = (struct iwn_beacon_missed *)(desc + 1); int misses; misses = le32toh(miss->consecutive); DPRINTF(sc, IWN_DEBUG_STATE, "%s: beacons missed %d/%d\n", __func__, misses, le32toh(miss->total)); /* * If more than 5 consecutive beacons are missed, * reinitialize the sensitivity state machine. */ if (vap->iv_state == IEEE80211_S_RUN && (ic->ic_flags & IEEE80211_F_SCAN) == 0) { if (misses > 5) (void)iwn_init_sensitivity(sc); if (misses >= vap->iv_bmissthreshold) { IWN_UNLOCK(sc); ieee80211_beacon_miss(ic); IWN_LOCK(sc); } } break; } case IWN_UC_READY: { struct iwn_ucode_info *uc = (struct iwn_ucode_info *)(desc + 1); /* The microcontroller is ready. */ DPRINTF(sc, IWN_DEBUG_RESET, "microcode alive notification version=%d.%d " "subtype=%x alive=%x\n", uc->major, uc->minor, uc->subtype, le32toh(uc->valid)); if (le32toh(uc->valid) != 1) { device_printf(sc->sc_dev, "microcontroller initialization failed"); break; } if (uc->subtype == IWN_UCODE_INIT) { /* Save microcontroller report. */ memcpy(&sc->ucode_info, uc, sizeof (*uc)); } /* Save the address of the error log in SRAM. */ sc->errptr = le32toh(uc->errptr); break; } #ifdef IWN_DEBUG case IWN_STATE_CHANGED: { /* * State change allows hardware switch change to be * noted. However, we handle this in iwn_intr as we * get both the enable/disble intr. */ uint32_t *status = (uint32_t *)(desc + 1); DPRINTF(sc, IWN_DEBUG_INTR | IWN_DEBUG_STATE, "state changed to %x\n", le32toh(*status)); break; } case IWN_START_SCAN: { struct iwn_start_scan *scan = (struct iwn_start_scan *)(desc + 1); DPRINTF(sc, IWN_DEBUG_ANY, "%s: scanning channel %d status %x\n", __func__, scan->chan, le32toh(scan->status)); break; } #endif case IWN_STOP_SCAN: { #ifdef IWN_DEBUG struct iwn_stop_scan *scan = (struct iwn_stop_scan *)(desc + 1); DPRINTF(sc, IWN_DEBUG_STATE | IWN_DEBUG_SCAN, "scan finished nchan=%d status=%d chan=%d\n", scan->nchan, scan->status, scan->chan); #endif sc->sc_is_scanning = 0; callout_stop(&sc->scan_timeout); IWN_UNLOCK(sc); ieee80211_scan_next(vap); IWN_LOCK(sc); break; } case IWN5000_CALIBRATION_RESULT: iwn5000_rx_calib_results(sc, desc); break; case IWN5000_CALIBRATION_DONE: sc->sc_flags |= IWN_FLAG_CALIB_DONE; wakeup(sc); break; } sc->rxq.cur = (sc->rxq.cur + 1) % IWN_RX_RING_COUNT; } /* Tell the firmware what we have processed. */ hw = (hw == 0) ? IWN_RX_RING_COUNT - 1 : hw - 1; IWN_WRITE(sc, IWN_FH_RX_WPTR, hw & ~7); } /* * Process an INT_WAKEUP interrupt raised when the microcontroller wakes up * from power-down sleep mode. */ static void iwn_wakeup_intr(struct iwn_softc *sc) { int qid; DPRINTF(sc, IWN_DEBUG_RESET, "%s: ucode wakeup from power-down sleep\n", __func__); /* Wakeup RX and TX rings. */ IWN_WRITE(sc, IWN_FH_RX_WPTR, sc->rxq.cur & ~7); for (qid = 0; qid < sc->ntxqs; qid++) { struct iwn_tx_ring *ring = &sc->txq[qid]; IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | ring->cur); } } static void iwn_rftoggle_task(void *arg, int npending) { struct iwn_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; uint32_t tmp; IWN_LOCK(sc); tmp = IWN_READ(sc, IWN_GP_CNTRL); IWN_UNLOCK(sc); device_printf(sc->sc_dev, "RF switch: radio %s\n", (tmp & IWN_GP_CNTRL_RFKILL) ? "enabled" : "disabled"); if (!(tmp & IWN_GP_CNTRL_RFKILL)) { ieee80211_suspend_all(ic); /* Enable interrupts to get RF toggle notification. */ IWN_LOCK(sc); IWN_WRITE(sc, IWN_INT, 0xffffffff); IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); IWN_UNLOCK(sc); } else ieee80211_resume_all(ic); } /* * Dump the error log of the firmware when a firmware panic occurs. Although * we can't debug the firmware because it is neither open source nor free, it * can help us to identify certain classes of problems. */ static void iwn_fatal_intr(struct iwn_softc *sc) { struct iwn_fw_dump dump; int i; IWN_LOCK_ASSERT(sc); /* Force a complete recalibration on next init. */ sc->sc_flags &= ~IWN_FLAG_CALIB_DONE; /* Check that the error log address is valid. */ if (sc->errptr < IWN_FW_DATA_BASE || sc->errptr + sizeof (dump) > IWN_FW_DATA_BASE + sc->fw_data_maxsz) { printf("%s: bad firmware error log address 0x%08x\n", __func__, sc->errptr); return; } if (iwn_nic_lock(sc) != 0) { printf("%s: could not read firmware error log\n", __func__); return; } /* Read firmware error log from SRAM. */ iwn_mem_read_region_4(sc, sc->errptr, (uint32_t *)&dump, sizeof (dump) / sizeof (uint32_t)); iwn_nic_unlock(sc); if (dump.valid == 0) { printf("%s: firmware error log is empty\n", __func__); return; } printf("firmware error log:\n"); printf(" error type = \"%s\" (0x%08X)\n", (dump.id < nitems(iwn_fw_errmsg)) ? iwn_fw_errmsg[dump.id] : "UNKNOWN", dump.id); printf(" program counter = 0x%08X\n", dump.pc); printf(" source line = 0x%08X\n", dump.src_line); printf(" error data = 0x%08X%08X\n", dump.error_data[0], dump.error_data[1]); printf(" branch link = 0x%08X%08X\n", dump.branch_link[0], dump.branch_link[1]); printf(" interrupt link = 0x%08X%08X\n", dump.interrupt_link[0], dump.interrupt_link[1]); printf(" time = %u\n", dump.time[0]); /* Dump driver status (TX and RX rings) while we're here. */ printf("driver status:\n"); for (i = 0; i < sc->ntxqs; i++) { struct iwn_tx_ring *ring = &sc->txq[i]; printf(" tx ring %2d: qid=%-2d cur=%-3d queued=%-3d\n", i, ring->qid, ring->cur, ring->queued); } printf(" rx ring: cur=%d\n", sc->rxq.cur); } static void iwn_intr(void *arg) { struct iwn_softc *sc = arg; uint32_t r1, r2, tmp; IWN_LOCK(sc); /* Disable interrupts. */ IWN_WRITE(sc, IWN_INT_MASK, 0); /* Read interrupts from ICT (fast) or from registers (slow). */ if (sc->sc_flags & IWN_FLAG_USE_ICT) { bus_dmamap_sync(sc->ict_dma.tag, sc->ict_dma.map, BUS_DMASYNC_POSTREAD); tmp = 0; while (sc->ict[sc->ict_cur] != 0) { tmp |= sc->ict[sc->ict_cur]; sc->ict[sc->ict_cur] = 0; /* Acknowledge. */ sc->ict_cur = (sc->ict_cur + 1) % IWN_ICT_COUNT; } tmp = le32toh(tmp); if (tmp == 0xffffffff) /* Shouldn't happen. */ tmp = 0; else if (tmp & 0xc0000) /* Workaround a HW bug. */ tmp |= 0x8000; r1 = (tmp & 0xff00) << 16 | (tmp & 0xff); r2 = 0; /* Unused. */ } else { r1 = IWN_READ(sc, IWN_INT); if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0) { IWN_UNLOCK(sc); return; /* Hardware gone! */ } r2 = IWN_READ(sc, IWN_FH_INT); } DPRINTF(sc, IWN_DEBUG_INTR, "interrupt reg1=0x%08x reg2=0x%08x\n" , r1, r2); if (r1 == 0 && r2 == 0) goto done; /* Interrupt not for us. */ /* Acknowledge interrupts. */ IWN_WRITE(sc, IWN_INT, r1); if (!(sc->sc_flags & IWN_FLAG_USE_ICT)) IWN_WRITE(sc, IWN_FH_INT, r2); if (r1 & IWN_INT_RF_TOGGLED) { taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task); goto done; } if (r1 & IWN_INT_CT_REACHED) { device_printf(sc->sc_dev, "%s: critical temperature reached!\n", __func__); } if (r1 & (IWN_INT_SW_ERR | IWN_INT_HW_ERR)) { device_printf(sc->sc_dev, "%s: fatal firmware error\n", __func__); #ifdef IWN_DEBUG iwn_debug_register(sc); #endif /* Dump firmware error log and stop. */ iwn_fatal_intr(sc); taskqueue_enqueue(sc->sc_tq, &sc->sc_panic_task); goto done; } if ((r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX | IWN_INT_RX_PERIODIC)) || (r2 & IWN_FH_INT_RX)) { if (sc->sc_flags & IWN_FLAG_USE_ICT) { if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_RX); IWN_WRITE_1(sc, IWN_INT_PERIODIC, IWN_INT_PERIODIC_DIS); iwn_notif_intr(sc); if (r1 & (IWN_INT_FH_RX | IWN_INT_SW_RX)) { IWN_WRITE_1(sc, IWN_INT_PERIODIC, IWN_INT_PERIODIC_ENA); } } else iwn_notif_intr(sc); } if ((r1 & IWN_INT_FH_TX) || (r2 & IWN_FH_INT_TX)) { if (sc->sc_flags & IWN_FLAG_USE_ICT) IWN_WRITE(sc, IWN_FH_INT, IWN_FH_INT_TX); wakeup(sc); /* FH DMA transfer completed. */ } if (r1 & IWN_INT_ALIVE) wakeup(sc); /* Firmware is alive. */ if (r1 & IWN_INT_WAKEUP) iwn_wakeup_intr(sc); done: /* Re-enable interrupts. */ if (sc->sc_flags & IWN_FLAG_RUNNING) IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); IWN_UNLOCK(sc); } /* * Update TX scheduler ring when transmitting an 802.11 frame (4965AGN and * 5000 adapters use a slightly different format). */ static void iwn4965_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, uint16_t len) { uint16_t *w = &sc->sched[qid * IWN4965_SCHED_COUNT + idx]; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); *w = htole16(len + 8); bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, BUS_DMASYNC_PREWRITE); if (idx < IWN_SCHED_WINSZ) { *(w + IWN_TX_RING_COUNT) = *w; bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, BUS_DMASYNC_PREWRITE); } } static void iwn5000_update_sched(struct iwn_softc *sc, int qid, int idx, uint8_t id, uint16_t len) { uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); *w = htole16(id << 12 | (len + 8)); bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, BUS_DMASYNC_PREWRITE); if (idx < IWN_SCHED_WINSZ) { *(w + IWN_TX_RING_COUNT) = *w; bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, BUS_DMASYNC_PREWRITE); } } #ifdef notyet static void iwn5000_reset_sched(struct iwn_softc *sc, int qid, int idx) { uint16_t *w = &sc->sched[qid * IWN5000_SCHED_COUNT + idx]; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); *w = (*w & htole16(0xf000)) | htole16(1); bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, BUS_DMASYNC_PREWRITE); if (idx < IWN_SCHED_WINSZ) { *(w + IWN_TX_RING_COUNT) = *w; bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map, BUS_DMASYNC_PREWRITE); } } #endif /* * Check whether OFDM 11g protection will be enabled for the given rate. * * The original driver code only enabled protection for OFDM rates. * It didn't check to see whether it was operating in 11a or 11bg mode. */ static int iwn_check_rate_needs_protection(struct iwn_softc *sc, struct ieee80211vap *vap, uint8_t rate) { struct ieee80211com *ic = vap->iv_ic; /* * Not in 2GHz mode? Then there's no need to enable OFDM * 11bg protection. */ if (! IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) { return (0); } /* * 11bg protection not enabled? Then don't use it. */ if ((ic->ic_flags & IEEE80211_F_USEPROT) == 0) return (0); /* * If it's an 11n rate - no protection. * We'll do it via a specific 11n check. */ if (rate & IEEE80211_RATE_MCS) { return (0); } /* * Do a rate table lookup. If the PHY is CCK, * don't do protection. */ if (ieee80211_rate2phytype(ic->ic_rt, rate) == IEEE80211_T_CCK) return (0); /* * Yup, enable protection. */ return (1); } /* * return a value between 0 and IWN_MAX_TX_RETRIES-1 as an index into * the link quality table that reflects this particular entry. */ static int iwn_tx_rate_to_linkq_offset(struct iwn_softc *sc, struct ieee80211_node *ni, uint8_t rate) { struct ieee80211_rateset *rs; int is_11n; int nr; int i; uint8_t cmp_rate; /* * Figure out if we're using 11n or not here. */ if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) is_11n = 1; else is_11n = 0; /* * Use the correct rate table. */ if (is_11n) { rs = (struct ieee80211_rateset *) &ni->ni_htrates; nr = ni->ni_htrates.rs_nrates; } else { rs = &ni->ni_rates; nr = rs->rs_nrates; } /* * Find the relevant link quality entry in the table. */ for (i = 0; i < nr && i < IWN_MAX_TX_RETRIES - 1 ; i++) { /* * The link quality table index starts at 0 == highest * rate, so we walk the rate table backwards. */ cmp_rate = rs->rs_rates[(nr - 1) - i]; if (rate & IEEE80211_RATE_MCS) cmp_rate |= IEEE80211_RATE_MCS; #if 0 DPRINTF(sc, IWN_DEBUG_XMIT, "%s: idx %d: nr=%d, rate=0x%02x, rateentry=0x%02x\n", __func__, i, nr, rate, cmp_rate); #endif if (cmp_rate == rate) return (i); } /* Failed? Start at the end */ return (IWN_MAX_TX_RETRIES - 1); } static int iwn_tx_data(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni) { const struct ieee80211_txparam *tp = ni->ni_txparms; struct ieee80211vap *vap = ni->ni_vap; struct ieee80211com *ic = ni->ni_ic; struct iwn_node *wn = (void *)ni; struct iwn_tx_ring *ring; struct iwn_tx_cmd *cmd; struct iwn_cmd_data *tx; struct ieee80211_frame *wh; struct ieee80211_key *k = NULL; uint32_t flags; uint16_t seqno, qos; uint8_t tid, type; int ac, totlen, rate; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); IWN_LOCK_ASSERT(sc); wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; /* Select EDCA Access Category and TX ring for this frame. */ if (IEEE80211_QOS_HAS_SEQ(wh)) { qos = ((const struct ieee80211_qosframe *)wh)->i_qos[0]; tid = qos & IEEE80211_QOS_TID; } else { qos = 0; tid = 0; } /* Choose a TX rate index. */ if (type == IEEE80211_FC0_TYPE_MGT || type == IEEE80211_FC0_TYPE_CTL || (m->m_flags & M_EAPOL) != 0) rate = tp->mgmtrate; else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) rate = tp->mcastrate; else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) rate = tp->ucastrate; else { /* XXX pass pktlen */ (void) ieee80211_ratectl_rate(ni, NULL, 0); rate = ni->ni_txrate; } /* * XXX TODO: Group addressed frames aren't aggregated and must * go to the normal non-aggregation queue, and have a NONQOS TID * assigned from net80211. */ ac = M_WME_GETAC(m); seqno = ni->ni_txseqs[tid]; if (m->m_flags & M_AMPDU_MPDU) { struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[ac]; if (!IEEE80211_AMPDU_RUNNING(tap)) { return (EINVAL); } /* * Queue this frame to the hardware ring that we've * negotiated AMPDU TX on. * * Note that the sequence number must match the TX slot * being used! */ ac = *(int *)tap->txa_private; *(uint16_t *)wh->i_seq = htole16(seqno << IEEE80211_SEQ_SEQ_SHIFT); ni->ni_txseqs[tid]++; } /* Encrypt the frame if need be. */ if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) { /* Retrieve key for TX. */ k = ieee80211_crypto_encap(ni, m); if (k == NULL) { return ENOBUFS; } /* 802.11 header may have moved. */ wh = mtod(m, struct ieee80211_frame *); } totlen = m->m_pkthdr.len; if (ieee80211_radiotap_active_vap(vap)) { struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; if (k != NULL) tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP; ieee80211_radiotap_tx(vap, m); } flags = 0; if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { /* Unicast frame, check if an ACK is expected. */ if (!qos || (qos & IEEE80211_QOS_ACKPOLICY) != IEEE80211_QOS_ACKPOLICY_NOACK) flags |= IWN_TX_NEED_ACK; } if ((wh->i_fc[0] & (IEEE80211_FC0_TYPE_MASK | IEEE80211_FC0_SUBTYPE_MASK)) == (IEEE80211_FC0_TYPE_CTL | IEEE80211_FC0_SUBTYPE_BAR)) flags |= IWN_TX_IMM_BA; /* Cannot happen yet. */ if (wh->i_fc[1] & IEEE80211_FC1_MORE_FRAG) flags |= IWN_TX_MORE_FRAG; /* Cannot happen yet. */ /* Check if frame must be protected using RTS/CTS or CTS-to-self. */ if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) { /* NB: Group frames are sent using CCK in 802.11b/g. */ if (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold) { flags |= IWN_TX_NEED_RTS; } else if (iwn_check_rate_needs_protection(sc, vap, rate)) { if (ic->ic_protmode == IEEE80211_PROT_CTSONLY) flags |= IWN_TX_NEED_CTS; else if (ic->ic_protmode == IEEE80211_PROT_RTSCTS) flags |= IWN_TX_NEED_RTS; } else if ((rate & IEEE80211_RATE_MCS) && (ic->ic_htprotmode == IEEE80211_PROT_RTSCTS)) { flags |= IWN_TX_NEED_RTS; } /* XXX HT protection? */ if (flags & (IWN_TX_NEED_RTS | IWN_TX_NEED_CTS)) { if (sc->hw_type != IWN_HW_REV_TYPE_4965) { /* 5000 autoselects RTS/CTS or CTS-to-self. */ flags &= ~(IWN_TX_NEED_RTS | IWN_TX_NEED_CTS); flags |= IWN_TX_NEED_PROTECTION; } else flags |= IWN_TX_FULL_TXOP; } } ring = &sc->txq[ac]; if ((m->m_flags & M_AMPDU_MPDU) != 0 && (seqno % 256) != ring->cur) { device_printf(sc->sc_dev, "%s: m=%p: seqno (%d) (%d) != ring index (%d) !\n", __func__, m, seqno, seqno % 256, ring->cur); } /* Prepare TX firmware command. */ cmd = &ring->cmd[ring->cur]; tx = (struct iwn_cmd_data *)cmd->data; /* NB: No need to clear tx, all fields are reinitialized here. */ tx->scratch = 0; /* clear "scratch" area */ if (IEEE80211_IS_MULTICAST(wh->i_addr1) || type != IEEE80211_FC0_TYPE_DATA) tx->id = sc->broadcast_id; else tx->id = wn->id; if (type == IEEE80211_FC0_TYPE_MGT) { uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* Tell HW to set timestamp in probe responses. */ if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= IWN_TX_INSERT_TSTAMP; if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) tx->timeout = htole16(3); else tx->timeout = htole16(2); } else tx->timeout = htole16(0); if (tx->id == sc->broadcast_id) { /* Group or management frame. */ tx->linkq = 0; } else { tx->linkq = iwn_tx_rate_to_linkq_offset(sc, ni, rate); flags |= IWN_TX_LINKQ; /* enable MRR */ } tx->tid = tid; tx->rts_ntries = 60; tx->data_ntries = 15; tx->lifetime = htole32(IWN_LIFETIME_INFINITE); tx->rate = iwn_rate_to_plcp(sc, ni, rate); tx->security = 0; tx->flags = htole32(flags); return (iwn_tx_cmd(sc, m, ni, ring)); } static int iwn_tx_data_raw(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, const struct ieee80211_bpf_params *params) { struct ieee80211vap *vap = ni->ni_vap; struct iwn_tx_cmd *cmd; struct iwn_cmd_data *tx; struct ieee80211_frame *wh; struct iwn_tx_ring *ring; uint32_t flags; int ac, rate; uint8_t type; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); IWN_LOCK_ASSERT(sc); wh = mtod(m, struct ieee80211_frame *); type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK; ac = params->ibp_pri & 3; /* Choose a TX rate. */ rate = params->ibp_rate0; flags = 0; if ((params->ibp_flags & IEEE80211_BPF_NOACK) == 0) flags |= IWN_TX_NEED_ACK; if (params->ibp_flags & IEEE80211_BPF_RTS) { if (sc->hw_type != IWN_HW_REV_TYPE_4965) { /* 5000 autoselects RTS/CTS or CTS-to-self. */ flags &= ~IWN_TX_NEED_RTS; flags |= IWN_TX_NEED_PROTECTION; } else flags |= IWN_TX_NEED_RTS | IWN_TX_FULL_TXOP; } if (params->ibp_flags & IEEE80211_BPF_CTS) { if (sc->hw_type != IWN_HW_REV_TYPE_4965) { /* 5000 autoselects RTS/CTS or CTS-to-self. */ flags &= ~IWN_TX_NEED_CTS; flags |= IWN_TX_NEED_PROTECTION; } else flags |= IWN_TX_NEED_CTS | IWN_TX_FULL_TXOP; } if (ieee80211_radiotap_active_vap(vap)) { struct iwn_tx_radiotap_header *tap = &sc->sc_txtap; tap->wt_flags = 0; tap->wt_rate = rate; ieee80211_radiotap_tx(vap, m); } ring = &sc->txq[ac]; cmd = &ring->cmd[ring->cur]; tx = (struct iwn_cmd_data *)cmd->data; /* NB: No need to clear tx, all fields are reinitialized here. */ tx->scratch = 0; /* clear "scratch" area */ if (type == IEEE80211_FC0_TYPE_MGT) { uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK; /* Tell HW to set timestamp in probe responses. */ if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP) flags |= IWN_TX_INSERT_TSTAMP; if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ || subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) tx->timeout = htole16(3); else tx->timeout = htole16(2); } else tx->timeout = htole16(0); tx->tid = 0; tx->id = sc->broadcast_id; tx->rts_ntries = params->ibp_try1; tx->data_ntries = params->ibp_try0; tx->lifetime = htole32(IWN_LIFETIME_INFINITE); tx->rate = iwn_rate_to_plcp(sc, ni, rate); tx->security = 0; tx->flags = htole32(flags); /* Group or management frame. */ tx->linkq = 0; return (iwn_tx_cmd(sc, m, ni, ring)); } static int iwn_tx_cmd(struct iwn_softc *sc, struct mbuf *m, struct ieee80211_node *ni, struct iwn_tx_ring *ring) { struct iwn_ops *ops = &sc->ops; struct iwn_tx_cmd *cmd; struct iwn_cmd_data *tx; struct ieee80211_frame *wh; struct iwn_tx_desc *desc; struct iwn_tx_data *data; bus_dma_segment_t *seg, segs[IWN_MAX_SCATTER]; struct mbuf *m1; u_int hdrlen; int totlen, error, pad, nsegs = 0, i; wh = mtod(m, struct ieee80211_frame *); hdrlen = ieee80211_anyhdrsize(wh); totlen = m->m_pkthdr.len; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; /* Prepare TX firmware command. */ cmd = &ring->cmd[ring->cur]; cmd->code = IWN_CMD_TX_DATA; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; tx = (struct iwn_cmd_data *)cmd->data; tx->len = htole16(totlen); /* Set physical address of "scratch area". */ tx->loaddr = htole32(IWN_LOADDR(data->scratch_paddr)); tx->hiaddr = IWN_HIADDR(data->scratch_paddr); if (hdrlen & 3) { /* First segment length must be a multiple of 4. */ tx->flags |= htole32(IWN_TX_NEED_PADDING); pad = 4 - (hdrlen & 3); } else pad = 0; /* Copy 802.11 header in TX command. */ memcpy((uint8_t *)(tx + 1), wh, hdrlen); /* Trim 802.11 header. */ m_adj(m, hdrlen); error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { if (error != EFBIG) { device_printf(sc->sc_dev, "%s: can't map mbuf (error %d)\n", __func__, error); return error; } /* Too many DMA segments, linearize mbuf. */ m1 = m_collapse(m, M_NOWAIT, IWN_MAX_SCATTER - 1); if (m1 == NULL) { device_printf(sc->sc_dev, "%s: could not defrag mbuf\n", __func__); return ENOBUFS; } m = m1; error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { /* XXX fix this */ /* * NB: Do not return error; * original mbuf does not exist anymore. */ device_printf(sc->sc_dev, "%s: can't map mbuf (error %d)\n", __func__, error); if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); m_freem(m); return 0; } } data->m = m; data->ni = ni; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: qid %d idx %d len %d nsegs %d " "plcp %d\n", __func__, ring->qid, ring->cur, totlen, nsegs, tx->rate); /* Fill TX descriptor. */ desc->nsegs = 1; if (m->m_len != 0) desc->nsegs += nsegs; /* First DMA segment is used by the TX command. */ desc->segs[0].addr = htole32(IWN_LOADDR(data->cmd_paddr)); desc->segs[0].len = htole16(IWN_HIADDR(data->cmd_paddr) | (4 + sizeof (*tx) + hdrlen + pad) << 4); /* Other DMA segments are for data payload. */ seg = &segs[0]; for (i = 1; i <= nsegs; i++) { desc->segs[i].addr = htole32(IWN_LOADDR(seg->ds_addr)); desc->segs[i].len = htole16(IWN_HIADDR(seg->ds_addr) | seg->ds_len << 4); seg++; } bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, BUS_DMASYNC_PREWRITE); bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); /* Update TX scheduler. */ if (ring->qid >= sc->firstaggqueue) ops->update_sched(sc, ring->qid, ring->cur, tx->id, totlen); /* Kick TX ring. */ ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); /* Mark TX ring as full if we reach a certain threshold. */ if (++ring->queued > IWN_TX_RING_HIMARK) sc->qfullmsk |= 1 << ring->qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return 0; } static void iwn_xmit_task(void *arg0, int pending) { struct iwn_softc *sc = arg0; struct ieee80211_node *ni; struct mbuf *m; int error; struct ieee80211_bpf_params p; int have_p; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: called\n", __func__); IWN_LOCK(sc); /* * Dequeue frames, attempt to transmit, * then disable beaconwait when we're done. */ while ((m = mbufq_dequeue(&sc->sc_xmit_queue)) != NULL) { have_p = 0; ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; /* Get xmit params if appropriate */ if (ieee80211_get_xmit_params(m, &p) == 0) have_p = 1; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: m=%p, have_p=%d\n", __func__, m, have_p); /* If we have xmit params, use them */ if (have_p) error = iwn_tx_data_raw(sc, m, ni, &p); else error = iwn_tx_data(sc, m, ni); if (error != 0) { if_inc_counter(ni->ni_vap->iv_ifp, IFCOUNTER_OERRORS, 1); ieee80211_free_node(ni); m_freem(m); } } sc->sc_beacon_wait = 0; IWN_UNLOCK(sc); } /* * raw frame xmit - free node/reference if failed. */ static int iwn_raw_xmit(struct ieee80211_node *ni, struct mbuf *m, const struct ieee80211_bpf_params *params) { struct ieee80211com *ic = ni->ni_ic; struct iwn_softc *sc = ic->ic_softc; int error = 0; DPRINTF(sc, IWN_DEBUG_XMIT | IWN_DEBUG_TRACE, "->%s begin\n", __func__); IWN_LOCK(sc); if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0) { m_freem(m); IWN_UNLOCK(sc); return (ENETDOWN); } /* queue frame if we have to */ if (sc->sc_beacon_wait) { if (iwn_xmit_queue_enqueue(sc, m) != 0) { m_freem(m); IWN_UNLOCK(sc); return (ENOBUFS); } /* Queued, so just return OK */ IWN_UNLOCK(sc); return (0); } if (params == NULL) { /* * Legacy path; interpret frame contents to decide * precisely how to send the frame. */ error = iwn_tx_data(sc, m, ni); } else { /* * Caller supplied explicit parameters to use in * sending the frame. */ error = iwn_tx_data_raw(sc, m, ni, params); } if (error == 0) sc->sc_tx_timer = 5; else m_freem(m); IWN_UNLOCK(sc); DPRINTF(sc, IWN_DEBUG_TRACE | IWN_DEBUG_XMIT, "->%s: end\n",__func__); return (error); } /* * transmit - don't free mbuf if failed; don't free node ref if failed. */ static int iwn_transmit(struct ieee80211com *ic, struct mbuf *m) { struct iwn_softc *sc = ic->ic_softc; struct ieee80211_node *ni; int error; ni = (struct ieee80211_node *)m->m_pkthdr.rcvif; IWN_LOCK(sc); if ((sc->sc_flags & IWN_FLAG_RUNNING) == 0 || sc->sc_beacon_wait) { IWN_UNLOCK(sc); return (ENXIO); } if (sc->qfullmsk) { IWN_UNLOCK(sc); return (ENOBUFS); } error = iwn_tx_data(sc, m, ni); if (!error) sc->sc_tx_timer = 5; IWN_UNLOCK(sc); return (error); } static void iwn_scan_timeout(void *arg) { struct iwn_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; ic_printf(ic, "scan timeout\n"); ieee80211_restart_all(ic); } static void iwn_watchdog(void *arg) { struct iwn_softc *sc = arg; struct ieee80211com *ic = &sc->sc_ic; IWN_LOCK_ASSERT(sc); KASSERT(sc->sc_flags & IWN_FLAG_RUNNING, ("not running")); DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); if (sc->sc_tx_timer > 0) { if (--sc->sc_tx_timer == 0) { ic_printf(ic, "device timeout\n"); ieee80211_restart_all(ic); return; } } callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); } static int iwn_cdev_open(struct cdev *dev, int flags, int type, struct thread *td) { return (0); } static int iwn_cdev_close(struct cdev *dev, int flags, int type, struct thread *td) { return (0); } static int iwn_cdev_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag, struct thread *td) { int rc; struct iwn_softc *sc = dev->si_drv1; struct iwn_ioctl_data *d; rc = priv_check(td, PRIV_DRIVER); if (rc != 0) return (0); switch (cmd) { case SIOCGIWNSTATS: d = (struct iwn_ioctl_data *) data; IWN_LOCK(sc); /* XXX validate permissions/memory/etc? */ rc = copyout(&sc->last_stat, d->dst_addr, sizeof(struct iwn_stats)); IWN_UNLOCK(sc); break; case SIOCZIWNSTATS: IWN_LOCK(sc); memset(&sc->last_stat, 0, sizeof(struct iwn_stats)); IWN_UNLOCK(sc); break; default: rc = EINVAL; break; } return (rc); } static int iwn_ioctl(struct ieee80211com *ic, u_long cmd, void *data) { return (ENOTTY); } static void iwn_parent(struct ieee80211com *ic) { struct iwn_softc *sc = ic->ic_softc; struct ieee80211vap *vap; int error; if (ic->ic_nrunning > 0) { error = iwn_init(sc); switch (error) { case 0: ieee80211_start_all(ic); break; case 1: /* radio is disabled via RFkill switch */ taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task); break; default: vap = TAILQ_FIRST(&ic->ic_vaps); if (vap != NULL) ieee80211_stop(vap); break; } } else iwn_stop(sc); } /* * Send a command to the firmware. */ static int iwn_cmd(struct iwn_softc *sc, int code, const void *buf, int size, int async) { struct iwn_tx_ring *ring; struct iwn_tx_desc *desc; struct iwn_tx_data *data; struct iwn_tx_cmd *cmd; struct mbuf *m; bus_addr_t paddr; int totlen, error; int cmd_queue_num; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); if (async == 0) IWN_LOCK_ASSERT(sc); if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) cmd_queue_num = IWN_PAN_CMD_QUEUE; else cmd_queue_num = IWN_CMD_QUEUE_NUM; ring = &sc->txq[cmd_queue_num]; desc = &ring->desc[ring->cur]; data = &ring->data[ring->cur]; totlen = 4 + size; if (size > sizeof cmd->data) { /* Command is too large to fit in a descriptor. */ if (totlen > MCLBYTES) return EINVAL; m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MJUMPAGESIZE); if (m == NULL) return ENOMEM; cmd = mtod(m, struct iwn_tx_cmd *); error = bus_dmamap_load(ring->data_dmat, data->map, cmd, totlen, iwn_dma_map_addr, &paddr, BUS_DMA_NOWAIT); if (error != 0) { m_freem(m); return error; } data->m = m; } else { cmd = &ring->cmd[ring->cur]; paddr = data->cmd_paddr; } cmd->code = code; cmd->flags = 0; cmd->qid = ring->qid; cmd->idx = ring->cur; memcpy(cmd->data, buf, size); desc->nsegs = 1; desc->segs[0].addr = htole32(IWN_LOADDR(paddr)); desc->segs[0].len = htole16(IWN_HIADDR(paddr) | totlen << 4); DPRINTF(sc, IWN_DEBUG_CMD, "%s: %s (0x%x) flags %d qid %d idx %d\n", __func__, iwn_intr_str(cmd->code), cmd->code, cmd->flags, cmd->qid, cmd->idx); if (size > sizeof cmd->data) { bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREWRITE); } else { bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map, BUS_DMASYNC_PREWRITE); } bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map, BUS_DMASYNC_PREWRITE); /* Kick command ring. */ ring->cur = (ring->cur + 1) % IWN_TX_RING_COUNT; IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return async ? 0 : msleep(desc, &sc->sc_mtx, PCATCH, "iwncmd", hz); } static int iwn4965_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) { struct iwn4965_node_info hnode; caddr_t src, dst; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* * We use the node structure for 5000 Series internally (it is * a superset of the one for 4965AGN). We thus copy the common * fields before sending the command. */ src = (caddr_t)node; dst = (caddr_t)&hnode; memcpy(dst, src, 48); /* Skip TSC, RX MIC and TX MIC fields from ``src''. */ memcpy(dst + 48, src + 72, 20); return iwn_cmd(sc, IWN_CMD_ADD_NODE, &hnode, sizeof hnode, async); } static int iwn5000_add_node(struct iwn_softc *sc, struct iwn_node_info *node, int async) { DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Direct mapping. */ return iwn_cmd(sc, IWN_CMD_ADD_NODE, node, sizeof (*node), async); } static int iwn_set_link_quality(struct iwn_softc *sc, struct ieee80211_node *ni) { struct iwn_node *wn = (void *)ni; struct ieee80211_rateset *rs; struct iwn_cmd_link_quality linkq; int i, rate, txrate; int is_11n; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); memset(&linkq, 0, sizeof linkq); linkq.id = wn->id; linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc); linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc); linkq.ampdu_max = 32; /* XXX negotiated? */ linkq.ampdu_threshold = 3; linkq.ampdu_limit = htole16(4000); /* 4ms */ DPRINTF(sc, IWN_DEBUG_XMIT, "%s: 1stream antenna=0x%02x, 2stream antenna=0x%02x, ntxstreams=%d\n", __func__, linkq.antmsk_1stream, linkq.antmsk_2stream, sc->ntxchains); /* * Are we using 11n rates? Ensure the channel is * 11n _and_ we have some 11n rates, or don't * try. */ if (IEEE80211_IS_CHAN_HT(ni->ni_chan) && ni->ni_htrates.rs_nrates > 0) { rs = (struct ieee80211_rateset *) &ni->ni_htrates; is_11n = 1; } else { rs = &ni->ni_rates; is_11n = 0; } /* Start at highest available bit-rate. */ /* * XXX this is all very dirty! */ if (is_11n) txrate = ni->ni_htrates.rs_nrates - 1; else txrate = rs->rs_nrates - 1; for (i = 0; i < IWN_MAX_TX_RETRIES; i++) { uint32_t plcp; /* * XXX TODO: ensure the last two slots are the two lowest * rate entries, just for now. */ if (i == 14 || i == 15) txrate = 0; if (is_11n) rate = IEEE80211_RATE_MCS | rs->rs_rates[txrate]; else rate = IEEE80211_RV(rs->rs_rates[txrate]); /* Do rate -> PLCP config mapping */ plcp = iwn_rate_to_plcp(sc, ni, rate); linkq.retry[i] = plcp; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: i=%d, txrate=%d, rate=0x%02x, plcp=0x%08x\n", __func__, i, txrate, rate, le32toh(plcp)); /* * The mimo field is an index into the table which * indicates the first index where it and subsequent entries * will not be using MIMO. * * Since we're filling linkq from 0..15 and we're filling * from the highest MCS rates to the lowest rates, if we * _are_ doing a dual-stream rate, set mimo to idx+1 (ie, * the next entry.) That way if the next entry is a non-MIMO * entry, we're already pointing at it. */ if ((le32toh(plcp) & IWN_RFLAG_MCS) && IEEE80211_RV(le32toh(plcp)) > 7) linkq.mimo = i + 1; /* Next retry at immediate lower bit-rate. */ if (txrate > 0) txrate--; } /* * If we reached the end of the list and indeed we hit * all MIMO rates (eg 5300 doing MCS23-15) then yes, * set mimo to 15. Setting it to 16 panics the firmware. */ if (linkq.mimo > 15) linkq.mimo = 15; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: mimo = %d\n", __func__, linkq.mimo); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, 1); } /* * Broadcast node is used to send group-addressed and management frames. */ static int iwn_add_broadcast_node(struct iwn_softc *sc, int async) { struct iwn_ops *ops = &sc->ops; struct ieee80211com *ic = &sc->sc_ic; struct iwn_node_info node; struct iwn_cmd_link_quality linkq; uint8_t txant; int i, error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.macaddr, ieee80211broadcastaddr); node.id = sc->broadcast_id; DPRINTF(sc, IWN_DEBUG_RESET, "%s: adding broadcast node\n", __func__); if ((error = ops->add_node(sc, &node, async)) != 0) return error; /* Use the first valid TX antenna. */ txant = IWN_LSB(sc->txchainmask); memset(&linkq, 0, sizeof linkq); linkq.id = sc->broadcast_id; linkq.antmsk_1stream = iwn_get_1stream_tx_antmask(sc); linkq.antmsk_2stream = iwn_get_2stream_tx_antmask(sc); linkq.ampdu_max = 64; linkq.ampdu_threshold = 3; linkq.ampdu_limit = htole16(4000); /* 4ms */ /* Use lowest mandatory bit-rate. */ /* XXX rate table lookup? */ if (IEEE80211_IS_CHAN_5GHZ(ic->ic_curchan)) linkq.retry[0] = htole32(0xd); else linkq.retry[0] = htole32(10 | IWN_RFLAG_CCK); linkq.retry[0] |= htole32(IWN_RFLAG_ANT(txant)); /* Use same bit-rate for all TX retries. */ for (i = 1; i < IWN_MAX_TX_RETRIES; i++) { linkq.retry[i] = linkq.retry[0]; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return iwn_cmd(sc, IWN_CMD_LINK_QUALITY, &linkq, sizeof linkq, async); } static int iwn_updateedca(struct ieee80211com *ic) { #define IWN_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */ struct iwn_softc *sc = ic->ic_softc; struct iwn_edca_params cmd; struct chanAccParams chp; int aci; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); ieee80211_wme_ic_getparams(ic, &chp); memset(&cmd, 0, sizeof cmd); cmd.flags = htole32(IWN_EDCA_UPDATE); IEEE80211_LOCK(ic); for (aci = 0; aci < WME_NUM_AC; aci++) { const struct wmeParams *ac = &chp.cap_wmeParams[aci]; cmd.ac[aci].aifsn = ac->wmep_aifsn; cmd.ac[aci].cwmin = htole16(IWN_EXP2(ac->wmep_logcwmin)); cmd.ac[aci].cwmax = htole16(IWN_EXP2(ac->wmep_logcwmax)); cmd.ac[aci].txoplimit = htole16(IEEE80211_TXOP_TO_US(ac->wmep_txopLimit)); } IEEE80211_UNLOCK(ic); IWN_LOCK(sc); (void)iwn_cmd(sc, IWN_CMD_EDCA_PARAMS, &cmd, sizeof cmd, 1); IWN_UNLOCK(sc); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return 0; #undef IWN_EXP2 } static void iwn_set_promisc(struct iwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; uint32_t promisc_filter; promisc_filter = IWN_FILTER_CTL | IWN_FILTER_PROMISC; if (ic->ic_promisc > 0 || ic->ic_opmode == IEEE80211_M_MONITOR) sc->rxon->filter |= htole32(promisc_filter); else sc->rxon->filter &= ~htole32(promisc_filter); } static void iwn_update_promisc(struct ieee80211com *ic) { struct iwn_softc *sc = ic->ic_softc; int error; if (ic->ic_opmode == IEEE80211_M_MONITOR) return; /* nothing to do */ IWN_LOCK(sc); if (!(sc->sc_flags & IWN_FLAG_RUNNING)) { IWN_UNLOCK(sc); return; } iwn_set_promisc(sc); if ((error = iwn_send_rxon(sc, 1, 1)) != 0) { device_printf(sc->sc_dev, "%s: could not send RXON, error %d\n", __func__, error); } IWN_UNLOCK(sc); } static void iwn_update_mcast(struct ieee80211com *ic) { /* Ignore */ } static void iwn_set_led(struct iwn_softc *sc, uint8_t which, uint8_t off, uint8_t on) { struct iwn_cmd_led led; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); #if 0 /* XXX don't set LEDs during scan? */ if (sc->sc_is_scanning) return; #endif /* Clear microcode LED ownership. */ IWN_CLRBITS(sc, IWN_LED, IWN_LED_BSM_CTRL); led.which = which; led.unit = htole32(10000); /* on/off in unit of 100ms */ led.off = off; led.on = on; (void)iwn_cmd(sc, IWN_CMD_SET_LED, &led, sizeof led, 1); } /* * Set the critical temperature at which the firmware will stop the radio * and notify us. */ static int iwn_set_critical_temp(struct iwn_softc *sc) { struct iwn_critical_temp crit; int32_t temp; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CTEMP_STOP_RF); if (sc->hw_type == IWN_HW_REV_TYPE_5150) temp = (IWN_CTOK(110) - sc->temp_off) * -5; else if (sc->hw_type == IWN_HW_REV_TYPE_4965) temp = IWN_CTOK(110); else temp = 110; memset(&crit, 0, sizeof crit); crit.tempR = htole32(temp); DPRINTF(sc, IWN_DEBUG_RESET, "setting critical temp to %d\n", temp); return iwn_cmd(sc, IWN_CMD_SET_CRITICAL_TEMP, &crit, sizeof crit, 0); } static int iwn_set_timing(struct iwn_softc *sc, struct ieee80211_node *ni) { struct iwn_cmd_timing cmd; uint64_t val, mod; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); memset(&cmd, 0, sizeof cmd); memcpy(&cmd.tstamp, ni->ni_tstamp.data, sizeof (uint64_t)); cmd.bintval = htole16(ni->ni_intval); cmd.lintval = htole16(10); /* Compute remaining time until next beacon. */ val = (uint64_t)ni->ni_intval * IEEE80211_DUR_TU; mod = le64toh(cmd.tstamp) % val; cmd.binitval = htole32((uint32_t)(val - mod)); DPRINTF(sc, IWN_DEBUG_RESET, "timing bintval=%u tstamp=%ju, init=%u\n", ni->ni_intval, le64toh(cmd.tstamp), (uint32_t)(val - mod)); return iwn_cmd(sc, IWN_CMD_TIMING, &cmd, sizeof cmd, 1); } static void iwn4965_power_calibration(struct iwn_softc *sc, int temp) { DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Adjust TX power if need be (delta >= 3 degC). */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: temperature %d->%d\n", __func__, sc->temp, temp); if (abs(temp - sc->temp) >= 3) { /* Record temperature of last calibration. */ sc->temp = temp; (void)iwn4965_set_txpower(sc, 1); } } /* * Set TX power for current channel (each rate has its own power settings). * This function takes into account the regulatory information from EEPROM, * the current temperature and the current voltage. */ static int iwn4965_set_txpower(struct iwn_softc *sc, int async) { /* Fixed-point arithmetic division using a n-bit fractional part. */ #define fdivround(a, b, n) \ ((((1 << n) * (a)) / (b) + (1 << n) / 2) / (1 << n)) /* Linear interpolation. */ #define interpolate(x, x1, y1, x2, y2, n) \ ((y1) + fdivround(((int)(x) - (x1)) * ((y2) - (y1)), (x2) - (x1), n)) static const int tdiv[IWN_NATTEN_GROUPS] = { 9, 8, 8, 8, 6 }; struct iwn_ucode_info *uc = &sc->ucode_info; struct iwn4965_cmd_txpower cmd; struct iwn4965_eeprom_chan_samples *chans; const uint8_t *rf_gain, *dsp_gain; int32_t vdiff, tdiff; int i, is_chan_5ghz, c, grp, maxpwr; uint8_t chan; sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; /* Retrieve current channel from last RXON. */ chan = sc->rxon->chan; is_chan_5ghz = (sc->rxon->flags & htole32(IWN_RXON_24GHZ)) == 0; DPRINTF(sc, IWN_DEBUG_RESET, "setting TX power for channel %d\n", chan); memset(&cmd, 0, sizeof cmd); cmd.band = is_chan_5ghz ? 0 : 1; cmd.chan = chan; if (is_chan_5ghz) { maxpwr = sc->maxpwr5GHz; rf_gain = iwn4965_rf_gain_5ghz; dsp_gain = iwn4965_dsp_gain_5ghz; } else { maxpwr = sc->maxpwr2GHz; rf_gain = iwn4965_rf_gain_2ghz; dsp_gain = iwn4965_dsp_gain_2ghz; } /* Compute voltage compensation. */ vdiff = ((int32_t)le32toh(uc->volt) - sc->eeprom_voltage) / 7; if (vdiff > 0) vdiff *= 2; if (abs(vdiff) > 2) vdiff = 0; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: voltage compensation=%d (UCODE=%d, EEPROM=%d)\n", __func__, vdiff, le32toh(uc->volt), sc->eeprom_voltage); /* Get channel attenuation group. */ if (chan <= 20) /* 1-20 */ grp = 4; else if (chan <= 43) /* 34-43 */ grp = 0; else if (chan <= 70) /* 44-70 */ grp = 1; else if (chan <= 124) /* 71-124 */ grp = 2; else /* 125-200 */ grp = 3; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: chan %d, attenuation group=%d\n", __func__, chan, grp); /* Get channel sub-band. */ for (i = 0; i < IWN_NBANDS; i++) if (sc->bands[i].lo != 0 && sc->bands[i].lo <= chan && chan <= sc->bands[i].hi) break; if (i == IWN_NBANDS) /* Can't happen in real-life. */ return EINVAL; chans = sc->bands[i].chans; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: chan %d sub-band=%d\n", __func__, chan, i); for (c = 0; c < 2; c++) { uint8_t power, gain, temp; int maxchpwr, pwr, ridx, idx; power = interpolate(chan, chans[0].num, chans[0].samples[c][1].power, chans[1].num, chans[1].samples[c][1].power, 1); gain = interpolate(chan, chans[0].num, chans[0].samples[c][1].gain, chans[1].num, chans[1].samples[c][1].gain, 1); temp = interpolate(chan, chans[0].num, chans[0].samples[c][1].temp, chans[1].num, chans[1].samples[c][1].temp, 1); DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: Tx chain %d: power=%d gain=%d temp=%d\n", __func__, c, power, gain, temp); /* Compute temperature compensation. */ tdiff = ((sc->temp - temp) * 2) / tdiv[grp]; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: temperature compensation=%d (current=%d, EEPROM=%d)\n", __func__, tdiff, sc->temp, temp); for (ridx = 0; ridx <= IWN_RIDX_MAX; ridx++) { /* Convert dBm to half-dBm. */ maxchpwr = sc->maxpwr[chan] * 2; if ((ridx / 8) & 1) maxchpwr -= 6; /* MIMO 2T: -3dB */ pwr = maxpwr; /* Adjust TX power based on rate. */ if ((ridx % 8) == 5) pwr -= 15; /* OFDM48: -7.5dB */ else if ((ridx % 8) == 6) pwr -= 17; /* OFDM54: -8.5dB */ else if ((ridx % 8) == 7) pwr -= 20; /* OFDM60: -10dB */ else pwr -= 10; /* Others: -5dB */ /* Do not exceed channel max TX power. */ if (pwr > maxchpwr) pwr = maxchpwr; idx = gain - (pwr - power) - tdiff - vdiff; if ((ridx / 8) & 1) /* MIMO */ idx += (int32_t)le32toh(uc->atten[grp][c]); if (cmd.band == 0) idx += 9; /* 5GHz */ if (ridx == IWN_RIDX_MAX) idx += 5; /* CCK */ /* Make sure idx stays in a valid range. */ if (idx < 0) idx = 0; else if (idx > IWN4965_MAX_PWR_INDEX) idx = IWN4965_MAX_PWR_INDEX; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: Tx chain %d, rate idx %d: power=%d\n", __func__, c, ridx, idx); cmd.power[ridx].rf_gain[c] = rf_gain[idx]; cmd.power[ridx].dsp_gain[c] = dsp_gain[idx]; } } DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_TXPOW, "%s: set tx power for chan %d\n", __func__, chan); return iwn_cmd(sc, IWN_CMD_TXPOWER, &cmd, sizeof cmd, async); #undef interpolate #undef fdivround } static int iwn5000_set_txpower(struct iwn_softc *sc, int async) { struct iwn5000_cmd_txpower cmd; int cmdid; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* * TX power calibration is handled automatically by the firmware * for 5000 Series. */ memset(&cmd, 0, sizeof cmd); cmd.global_limit = 2 * IWN5000_TXPOWER_MAX_DBM; /* 16 dBm */ cmd.flags = IWN5000_TXPOWER_NO_CLOSED; cmd.srv_limit = IWN5000_TXPOWER_AUTO; DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT, "%s: setting TX power; rev=%d\n", __func__, IWN_UCODE_API(sc->ucode_rev)); if (IWN_UCODE_API(sc->ucode_rev) == 1) cmdid = IWN_CMD_TXPOWER_DBM_V1; else cmdid = IWN_CMD_TXPOWER_DBM; return iwn_cmd(sc, cmdid, &cmd, sizeof cmd, async); } /* * Retrieve the maximum RSSI (in dBm) among receivers. */ static int iwn4965_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) { struct iwn4965_rx_phystat *phy = (void *)stat->phybuf; uint8_t mask, agc; int rssi; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); mask = (le16toh(phy->antenna) >> 4) & IWN_ANT_ABC; agc = (le16toh(phy->agc) >> 7) & 0x7f; rssi = 0; if (mask & IWN_ANT_A) rssi = MAX(rssi, phy->rssi[0]); if (mask & IWN_ANT_B) rssi = MAX(rssi, phy->rssi[2]); if (mask & IWN_ANT_C) rssi = MAX(rssi, phy->rssi[4]); DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d mask 0x%x rssi %d %d %d result %d\n", __func__, agc, mask, phy->rssi[0], phy->rssi[2], phy->rssi[4], rssi - agc - IWN_RSSI_TO_DBM); return rssi - agc - IWN_RSSI_TO_DBM; } static int iwn5000_get_rssi(struct iwn_softc *sc, struct iwn_rx_stat *stat) { struct iwn5000_rx_phystat *phy = (void *)stat->phybuf; uint8_t agc; int rssi; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); agc = (le32toh(phy->agc) >> 9) & 0x7f; rssi = MAX(le16toh(phy->rssi[0]) & 0xff, le16toh(phy->rssi[1]) & 0xff); rssi = MAX(le16toh(phy->rssi[2]) & 0xff, rssi); DPRINTF(sc, IWN_DEBUG_RECV, "%s: agc %d rssi %d %d %d result %d\n", __func__, agc, phy->rssi[0], phy->rssi[1], phy->rssi[2], rssi - agc - IWN_RSSI_TO_DBM); return rssi - agc - IWN_RSSI_TO_DBM; } /* * Retrieve the average noise (in dBm) among receivers. */ static int iwn_get_noise(const struct iwn_rx_general_stats *stats) { int i, total, nbant, noise; total = nbant = 0; for (i = 0; i < 3; i++) { if ((noise = le32toh(stats->noise[i]) & 0xff) == 0) continue; total += noise; nbant++; } /* There should be at least one antenna but check anyway. */ return (nbant == 0) ? -127 : (total / nbant) - 107; } /* * Compute temperature (in degC) from last received statistics. */ static int iwn4965_get_temperature(struct iwn_softc *sc) { struct iwn_ucode_info *uc = &sc->ucode_info; int32_t r1, r2, r3, r4, temp; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); r1 = le32toh(uc->temp[0].chan20MHz); r2 = le32toh(uc->temp[1].chan20MHz); r3 = le32toh(uc->temp[2].chan20MHz); r4 = le32toh(sc->rawtemp); if (r1 == r3) /* Prevents division by 0 (should not happen). */ return 0; /* Sign-extend 23-bit R4 value to 32-bit. */ r4 = ((r4 & 0xffffff) ^ 0x800000) - 0x800000; /* Compute temperature in Kelvin. */ temp = (259 * (r4 - r2)) / (r3 - r1); temp = (temp * 97) / 100 + 8; DPRINTF(sc, IWN_DEBUG_ANY, "temperature %dK/%dC\n", temp, IWN_KTOC(temp)); return IWN_KTOC(temp); } static int iwn5000_get_temperature(struct iwn_softc *sc) { int32_t temp; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* * Temperature is not used by the driver for 5000 Series because * TX power calibration is handled by firmware. */ temp = le32toh(sc->rawtemp); if (sc->hw_type == IWN_HW_REV_TYPE_5150) { temp = (temp / -5) + sc->temp_off; temp = IWN_KTOC(temp); } return temp; } /* * Initialize sensitivity calibration state machine. */ static int iwn_init_sensitivity(struct iwn_softc *sc) { struct iwn_ops *ops = &sc->ops; struct iwn_calib_state *calib = &sc->calib; uint32_t flags; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Reset calibration state machine. */ memset(calib, 0, sizeof (*calib)); calib->state = IWN_CALIB_STATE_INIT; calib->cck_state = IWN_CCK_STATE_HIFA; /* Set initial correlation values. */ calib->ofdm_x1 = sc->limits->min_ofdm_x1; calib->ofdm_mrc_x1 = sc->limits->min_ofdm_mrc_x1; calib->ofdm_x4 = sc->limits->min_ofdm_x4; calib->ofdm_mrc_x4 = sc->limits->min_ofdm_mrc_x4; calib->cck_x4 = 125; calib->cck_mrc_x4 = sc->limits->min_cck_mrc_x4; calib->energy_cck = sc->limits->energy_cck; /* Write initial sensitivity. */ if ((error = iwn_send_sensitivity(sc)) != 0) return error; /* Write initial gains. */ if ((error = ops->init_gains(sc)) != 0) return error; /* Request statistics at each beacon interval. */ flags = 0; DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending request for statistics\n", __func__); return iwn_cmd(sc, IWN_CMD_GET_STATISTICS, &flags, sizeof flags, 1); } /* * Collect noise and RSSI statistics for the first 20 beacons received * after association and use them to determine connected antennas and * to set differential gains. */ static void iwn_collect_noise(struct iwn_softc *sc, const struct iwn_rx_general_stats *stats) { struct iwn_ops *ops = &sc->ops; struct iwn_calib_state *calib = &sc->calib; struct ieee80211com *ic = &sc->sc_ic; uint32_t val; int i; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Accumulate RSSI and noise for all 3 antennas. */ for (i = 0; i < 3; i++) { calib->rssi[i] += le32toh(stats->rssi[i]) & 0xff; calib->noise[i] += le32toh(stats->noise[i]) & 0xff; } /* NB: We update differential gains only once after 20 beacons. */ if (++calib->nbeacons < 20) return; /* Determine highest average RSSI. */ val = MAX(calib->rssi[0], calib->rssi[1]); val = MAX(calib->rssi[2], val); /* Determine which antennas are connected. */ sc->chainmask = sc->rxchainmask; for (i = 0; i < 3; i++) if (val - calib->rssi[i] > 15 * 20) sc->chainmask &= ~(1 << i); DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT, "%s: RX chains mask: theoretical=0x%x, actual=0x%x\n", __func__, sc->rxchainmask, sc->chainmask); /* If none of the TX antennas are connected, keep at least one. */ if ((sc->chainmask & sc->txchainmask) == 0) sc->chainmask |= IWN_LSB(sc->txchainmask); (void)ops->set_gains(sc); calib->state = IWN_CALIB_STATE_RUN; #ifdef notyet /* XXX Disable RX chains with no antennas connected. */ sc->rxon->rxchain = htole16(IWN_RXCHAIN_SEL(sc->chainmask)); if (sc->sc_is_scanning) device_printf(sc->sc_dev, "%s: is_scanning set, before RXON\n", __func__); (void)iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, 1); #endif /* Enable power-saving mode if requested by user. */ if (ic->ic_flags & IEEE80211_F_PMGTON) (void)iwn_set_pslevel(sc, 0, 3, 1); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); } static int iwn4965_init_gains(struct iwn_softc *sc) { struct iwn_phy_calib_gain cmd; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); memset(&cmd, 0, sizeof cmd); cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; /* Differential gains initially set to 0 for all 3 antennas. */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting initial differential gains\n", __func__); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); } static int iwn5000_init_gains(struct iwn_softc *sc) { struct iwn_phy_calib cmd; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); memset(&cmd, 0, sizeof cmd); cmd.code = sc->reset_noise_gain; cmd.ngroups = 1; cmd.isvalid = 1; DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: setting initial differential gains\n", __func__); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); } static int iwn4965_set_gains(struct iwn_softc *sc) { struct iwn_calib_state *calib = &sc->calib; struct iwn_phy_calib_gain cmd; int i, delta, noise; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Get minimal noise among connected antennas. */ noise = INT_MAX; /* NB: There's at least one antenna. */ for (i = 0; i < 3; i++) if (sc->chainmask & (1 << i)) noise = MIN(calib->noise[i], noise); memset(&cmd, 0, sizeof cmd); cmd.code = IWN4965_PHY_CALIB_DIFF_GAIN; /* Set differential gains for connected antennas. */ for (i = 0; i < 3; i++) { if (sc->chainmask & (1 << i)) { /* Compute attenuation (in unit of 1.5dB). */ delta = (noise - (int32_t)calib->noise[i]) / 30; /* NB: delta <= 0 */ /* Limit to [-4.5dB,0]. */ cmd.gain[i] = MIN(abs(delta), 3); if (delta < 0) cmd.gain[i] |= 1 << 2; /* sign bit */ } } DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting differential gains Ant A/B/C: %x/%x/%x (%x)\n", cmd.gain[0], cmd.gain[1], cmd.gain[2], sc->chainmask); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); } static int iwn5000_set_gains(struct iwn_softc *sc) { struct iwn_calib_state *calib = &sc->calib; struct iwn_phy_calib_gain cmd; int i, ant, div, delta; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* We collected 20 beacons and !=6050 need a 1.5 factor. */ div = (sc->hw_type == IWN_HW_REV_TYPE_6050) ? 20 : 30; memset(&cmd, 0, sizeof cmd); cmd.code = sc->noise_gain; cmd.ngroups = 1; cmd.isvalid = 1; /* Get first available RX antenna as referential. */ ant = IWN_LSB(sc->rxchainmask); /* Set differential gains for other antennas. */ for (i = ant + 1; i < 3; i++) { if (sc->chainmask & (1 << i)) { /* The delta is relative to antenna "ant". */ delta = ((int32_t)calib->noise[ant] - (int32_t)calib->noise[i]) / div; /* Limit to [-4.5dB,+4.5dB]. */ cmd.gain[i - 1] = MIN(abs(delta), 3); if (delta < 0) cmd.gain[i - 1] |= 1 << 2; /* sign bit */ } } DPRINTF(sc, IWN_DEBUG_CALIBRATE | IWN_DEBUG_XMIT, "setting differential gains Ant B/C: %x/%x (%x)\n", cmd.gain[0], cmd.gain[1], sc->chainmask); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 1); } /* * Tune RF RX sensitivity based on the number of false alarms detected * during the last beacon period. */ static void iwn_tune_sensitivity(struct iwn_softc *sc, const struct iwn_rx_stats *stats) { #define inc(val, inc, max) \ if ((val) < (max)) { \ if ((val) < (max) - (inc)) \ (val) += (inc); \ else \ (val) = (max); \ needs_update = 1; \ } #define dec(val, dec, min) \ if ((val) > (min)) { \ if ((val) > (min) + (dec)) \ (val) -= (dec); \ else \ (val) = (min); \ needs_update = 1; \ } const struct iwn_sensitivity_limits *limits = sc->limits; struct iwn_calib_state *calib = &sc->calib; uint32_t val, rxena, fa; uint32_t energy[3], energy_min; uint8_t noise[3], noise_ref; int i, needs_update = 0; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Check that we've been enabled long enough. */ if ((rxena = le32toh(stats->general.load)) == 0){ DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end not so long\n", __func__); return; } /* Compute number of false alarms since last call for OFDM. */ fa = le32toh(stats->ofdm.bad_plcp) - calib->bad_plcp_ofdm; fa += le32toh(stats->ofdm.fa) - calib->fa_ofdm; fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ if (fa > 50 * rxena) { /* High false alarm count, decrease sensitivity. */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: OFDM high false alarm count: %u\n", __func__, fa); inc(calib->ofdm_x1, 1, limits->max_ofdm_x1); inc(calib->ofdm_mrc_x1, 1, limits->max_ofdm_mrc_x1); inc(calib->ofdm_x4, 1, limits->max_ofdm_x4); inc(calib->ofdm_mrc_x4, 1, limits->max_ofdm_mrc_x4); } else if (fa < 5 * rxena) { /* Low false alarm count, increase sensitivity. */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: OFDM low false alarm count: %u\n", __func__, fa); dec(calib->ofdm_x1, 1, limits->min_ofdm_x1); dec(calib->ofdm_mrc_x1, 1, limits->min_ofdm_mrc_x1); dec(calib->ofdm_x4, 1, limits->min_ofdm_x4); dec(calib->ofdm_mrc_x4, 1, limits->min_ofdm_mrc_x4); } /* Compute maximum noise among 3 receivers. */ for (i = 0; i < 3; i++) noise[i] = (le32toh(stats->general.noise[i]) >> 8) & 0xff; val = MAX(noise[0], noise[1]); val = MAX(noise[2], val); /* Insert it into our samples table. */ calib->noise_samples[calib->cur_noise_sample] = val; calib->cur_noise_sample = (calib->cur_noise_sample + 1) % 20; /* Compute maximum noise among last 20 samples. */ noise_ref = calib->noise_samples[0]; for (i = 1; i < 20; i++) noise_ref = MAX(noise_ref, calib->noise_samples[i]); /* Compute maximum energy among 3 receivers. */ for (i = 0; i < 3; i++) energy[i] = le32toh(stats->general.energy[i]); val = MIN(energy[0], energy[1]); val = MIN(energy[2], val); /* Insert it into our samples table. */ calib->energy_samples[calib->cur_energy_sample] = val; calib->cur_energy_sample = (calib->cur_energy_sample + 1) % 10; /* Compute minimum energy among last 10 samples. */ energy_min = calib->energy_samples[0]; for (i = 1; i < 10; i++) energy_min = MAX(energy_min, calib->energy_samples[i]); energy_min += 6; /* Compute number of false alarms since last call for CCK. */ fa = le32toh(stats->cck.bad_plcp) - calib->bad_plcp_cck; fa += le32toh(stats->cck.fa) - calib->fa_cck; fa *= 200 * IEEE80211_DUR_TU; /* 200TU */ if (fa > 50 * rxena) { /* High false alarm count, decrease sensitivity. */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: CCK high false alarm count: %u\n", __func__, fa); calib->cck_state = IWN_CCK_STATE_HIFA; calib->low_fa = 0; if (calib->cck_x4 > 160) { calib->noise_ref = noise_ref; if (calib->energy_cck > 2) dec(calib->energy_cck, 2, energy_min); } if (calib->cck_x4 < 160) { calib->cck_x4 = 161; needs_update = 1; } else inc(calib->cck_x4, 3, limits->max_cck_x4); inc(calib->cck_mrc_x4, 3, limits->max_cck_mrc_x4); } else if (fa < 5 * rxena) { /* Low false alarm count, increase sensitivity. */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: CCK low false alarm count: %u\n", __func__, fa); calib->cck_state = IWN_CCK_STATE_LOFA; calib->low_fa++; if (calib->cck_state != IWN_CCK_STATE_INIT && (((int32_t)calib->noise_ref - (int32_t)noise_ref) > 2 || calib->low_fa > 100)) { inc(calib->energy_cck, 2, limits->min_energy_cck); dec(calib->cck_x4, 3, limits->min_cck_x4); dec(calib->cck_mrc_x4, 3, limits->min_cck_mrc_x4); } } else { /* Not worth to increase or decrease sensitivity. */ DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: CCK normal false alarm count: %u\n", __func__, fa); calib->low_fa = 0; calib->noise_ref = noise_ref; if (calib->cck_state == IWN_CCK_STATE_HIFA) { /* Previous interval had many false alarms. */ dec(calib->energy_cck, 8, energy_min); } calib->cck_state = IWN_CCK_STATE_INIT; } if (needs_update) (void)iwn_send_sensitivity(sc); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); #undef dec #undef inc } static int iwn_send_sensitivity(struct iwn_softc *sc) { struct iwn_calib_state *calib = &sc->calib; struct iwn_enhanced_sensitivity_cmd cmd; int len; memset(&cmd, 0, sizeof cmd); len = sizeof (struct iwn_sensitivity_cmd); cmd.which = IWN_SENSITIVITY_WORKTBL; /* OFDM modulation. */ cmd.corr_ofdm_x1 = htole16(calib->ofdm_x1); cmd.corr_ofdm_mrc_x1 = htole16(calib->ofdm_mrc_x1); cmd.corr_ofdm_x4 = htole16(calib->ofdm_x4); cmd.corr_ofdm_mrc_x4 = htole16(calib->ofdm_mrc_x4); cmd.energy_ofdm = htole16(sc->limits->energy_ofdm); cmd.energy_ofdm_th = htole16(62); /* CCK modulation. */ cmd.corr_cck_x4 = htole16(calib->cck_x4); cmd.corr_cck_mrc_x4 = htole16(calib->cck_mrc_x4); cmd.energy_cck = htole16(calib->energy_cck); /* Barker modulation: use default values. */ cmd.corr_barker = htole16(190); cmd.corr_barker_mrc = htole16(sc->limits->barker_mrc); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: set sensitivity %d/%d/%d/%d/%d/%d/%d\n", __func__, calib->ofdm_x1, calib->ofdm_mrc_x1, calib->ofdm_x4, calib->ofdm_mrc_x4, calib->cck_x4, calib->cck_mrc_x4, calib->energy_cck); if (!(sc->sc_flags & IWN_FLAG_ENH_SENS)) goto send; /* Enhanced sensitivity settings. */ len = sizeof (struct iwn_enhanced_sensitivity_cmd); cmd.ofdm_det_slope_mrc = htole16(668); cmd.ofdm_det_icept_mrc = htole16(4); cmd.ofdm_det_slope = htole16(486); cmd.ofdm_det_icept = htole16(37); cmd.cck_det_slope_mrc = htole16(853); cmd.cck_det_icept_mrc = htole16(4); cmd.cck_det_slope = htole16(476); cmd.cck_det_icept = htole16(99); send: return iwn_cmd(sc, IWN_CMD_SET_SENSITIVITY, &cmd, len, 1); } /* * Look at the increase of PLCP errors over time; if it exceeds * a programmed threshold then trigger an RF retune. */ static void iwn_check_rx_recovery(struct iwn_softc *sc, struct iwn_stats *rs) { int32_t delta_ofdm, delta_ht, delta_cck; struct iwn_calib_state *calib = &sc->calib; int delta_ticks, cur_ticks; int delta_msec; int thresh; /* * Calculate the difference between the current and * previous statistics. */ delta_cck = le32toh(rs->rx.cck.bad_plcp) - calib->bad_plcp_cck; delta_ofdm = le32toh(rs->rx.ofdm.bad_plcp) - calib->bad_plcp_ofdm; delta_ht = le32toh(rs->rx.ht.bad_plcp) - calib->bad_plcp_ht; /* * Calculate the delta in time between successive statistics * messages. Yes, it can roll over; so we make sure that * this doesn't happen. * * XXX go figure out what to do about rollover * XXX go figure out what to do if ticks rolls over to -ve instead! * XXX go stab signed integer overflow undefined-ness in the face. */ cur_ticks = ticks; delta_ticks = cur_ticks - sc->last_calib_ticks; /* * If any are negative, then the firmware likely reset; so just * bail. We'll pick this up next time. */ if (delta_cck < 0 || delta_ofdm < 0 || delta_ht < 0 || delta_ticks < 0) return; /* * delta_ticks is in ticks; we need to convert it up to milliseconds * so we can do some useful math with it. */ delta_msec = ticks_to_msecs(delta_ticks); /* * Calculate what our threshold is given the current delta_msec. */ thresh = sc->base_params->plcp_err_threshold * delta_msec; DPRINTF(sc, IWN_DEBUG_STATE, "%s: time delta: %d; cck=%d, ofdm=%d, ht=%d, total=%d, thresh=%d\n", __func__, delta_msec, delta_cck, delta_ofdm, delta_ht, (delta_msec + delta_cck + delta_ofdm + delta_ht), thresh); /* * If we need a retune, then schedule a single channel scan * to a channel that isn't the currently active one! * * The math from linux iwlwifi: * * if ((delta * 100 / msecs) > threshold) */ if (thresh > 0 && (delta_cck + delta_ofdm + delta_ht) * 100 > thresh) { DPRINTF(sc, IWN_DEBUG_ANY, "%s: PLCP error threshold raw (%d) comparison (%d) " "over limit (%d); retune!\n", __func__, (delta_cck + delta_ofdm + delta_ht), (delta_cck + delta_ofdm + delta_ht) * 100, thresh); } } /* * Set STA mode power saving level (between 0 and 5). * Level 0 is CAM (Continuously Aware Mode), 5 is for maximum power saving. */ static int iwn_set_pslevel(struct iwn_softc *sc, int dtim, int level, int async) { struct iwn_pmgt_cmd cmd; const struct iwn_pmgt *pmgt; uint32_t max, skip_dtim; uint32_t reg; int i; DPRINTF(sc, IWN_DEBUG_PWRSAVE, "%s: dtim=%d, level=%d, async=%d\n", __func__, dtim, level, async); /* Select which PS parameters to use. */ if (dtim <= 2) pmgt = &iwn_pmgt[0][level]; else if (dtim <= 10) pmgt = &iwn_pmgt[1][level]; else pmgt = &iwn_pmgt[2][level]; memset(&cmd, 0, sizeof cmd); if (level != 0) /* not CAM */ cmd.flags |= htole16(IWN_PS_ALLOW_SLEEP); if (level == 5) cmd.flags |= htole16(IWN_PS_FAST_PD); /* Retrieve PCIe Active State Power Management (ASPM). */ reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 4); if (!(reg & PCIEM_LINK_CTL_ASPMC_L0S)) /* L0s Entry disabled. */ cmd.flags |= htole16(IWN_PS_PCI_PMGT); cmd.rxtimeout = htole32(pmgt->rxtimeout * 1024); cmd.txtimeout = htole32(pmgt->txtimeout * 1024); if (dtim == 0) { dtim = 1; skip_dtim = 0; } else skip_dtim = pmgt->skip_dtim; if (skip_dtim != 0) { cmd.flags |= htole16(IWN_PS_SLEEP_OVER_DTIM); max = pmgt->intval[4]; if (max == (uint32_t)-1) max = dtim * (skip_dtim + 1); else if (max > dtim) max = rounddown(max, dtim); } else max = dtim; for (i = 0; i < 5; i++) cmd.intval[i] = htole32(MIN(max, pmgt->intval[i])); DPRINTF(sc, IWN_DEBUG_RESET, "setting power saving level to %d\n", level); return iwn_cmd(sc, IWN_CMD_SET_POWER_MODE, &cmd, sizeof cmd, async); } static int iwn_send_btcoex(struct iwn_softc *sc) { struct iwn_bluetooth cmd; memset(&cmd, 0, sizeof cmd); cmd.flags = IWN_BT_COEX_CHAN_ANN | IWN_BT_COEX_BT_PRIO; cmd.lead_time = IWN_BT_LEAD_TIME_DEF; cmd.max_kill = IWN_BT_MAX_KILL_DEF; DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring bluetooth coexistence\n", __func__); return iwn_cmd(sc, IWN_CMD_BT_COEX, &cmd, sizeof(cmd), 0); } static int iwn_send_advanced_btcoex(struct iwn_softc *sc) { static const uint32_t btcoex_3wire[12] = { 0xaaaaaaaa, 0xaaaaaaaa, 0xaeaaaaaa, 0xaaaaaaaa, 0xcc00ff28, 0x0000aaaa, 0xcc00aaaa, 0x0000aaaa, 0xc0004000, 0x00004000, 0xf0005000, 0xf0005000, }; struct iwn6000_btcoex_config btconfig; struct iwn2000_btcoex_config btconfig2k; struct iwn_btcoex_priotable btprio; struct iwn_btcoex_prot btprot; int error, i; uint8_t flags; memset(&btconfig, 0, sizeof btconfig); memset(&btconfig2k, 0, sizeof btconfig2k); flags = IWN_BT_FLAG_COEX6000_MODE_3W << IWN_BT_FLAG_COEX6000_MODE_SHIFT; // Done as is in linux kernel 3.2 if (sc->base_params->bt_sco_disable) flags &= ~IWN_BT_FLAG_SYNC_2_BT_DISABLE; else flags |= IWN_BT_FLAG_SYNC_2_BT_DISABLE; flags |= IWN_BT_FLAG_COEX6000_CHAN_INHIBITION; /* Default flags result is 145 as old value */ /* * Flags value has to be review. Values must change if we * which to disable it */ if (sc->base_params->bt_session_2) { btconfig2k.flags = flags; btconfig2k.max_kill = 5; btconfig2k.bt3_t7_timer = 1; btconfig2k.kill_ack = htole32(0xffff0000); btconfig2k.kill_cts = htole32(0xffff0000); btconfig2k.sample_time = 2; btconfig2k.bt3_t2_timer = 0xc; for (i = 0; i < 12; i++) btconfig2k.lookup_table[i] = htole32(btcoex_3wire[i]); btconfig2k.valid = htole16(0xff); btconfig2k.prio_boost = htole32(0xf0); DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring advanced bluetooth coexistence" " session 2, flags : 0x%x\n", __func__, flags); error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig2k, sizeof(btconfig2k), 1); } else { btconfig.flags = flags; btconfig.max_kill = 5; btconfig.bt3_t7_timer = 1; btconfig.kill_ack = htole32(0xffff0000); btconfig.kill_cts = htole32(0xffff0000); btconfig.sample_time = 2; btconfig.bt3_t2_timer = 0xc; for (i = 0; i < 12; i++) btconfig.lookup_table[i] = htole32(btcoex_3wire[i]); btconfig.valid = htole16(0xff); btconfig.prio_boost = 0xf0; DPRINTF(sc, IWN_DEBUG_RESET, "%s: configuring advanced bluetooth coexistence," " flags : 0x%x\n", __func__, flags); error = iwn_cmd(sc, IWN_CMD_BT_COEX, &btconfig, sizeof(btconfig), 1); } if (error != 0) return error; memset(&btprio, 0, sizeof btprio); btprio.calib_init1 = 0x6; btprio.calib_init2 = 0x7; btprio.calib_periodic_low1 = 0x2; btprio.calib_periodic_low2 = 0x3; btprio.calib_periodic_high1 = 0x4; btprio.calib_periodic_high2 = 0x5; btprio.dtim = 0x6; btprio.scan52 = 0x8; btprio.scan24 = 0xa; error = iwn_cmd(sc, IWN_CMD_BT_COEX_PRIOTABLE, &btprio, sizeof(btprio), 1); if (error != 0) return error; /* Force BT state machine change. */ memset(&btprot, 0, sizeof btprot); btprot.open = 1; btprot.type = 1; error = iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); if (error != 0) return error; btprot.open = 0; return iwn_cmd(sc, IWN_CMD_BT_COEX_PROT, &btprot, sizeof(btprot), 1); } static int iwn5000_runtime_calib(struct iwn_softc *sc) { struct iwn5000_calib_config cmd; memset(&cmd, 0, sizeof cmd); cmd.ucode.once.enable = 0xffffffff; cmd.ucode.once.start = IWN5000_CALIB_DC; DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: configuring runtime calibration\n", __func__); return iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof(cmd), 0); } static uint32_t iwn_get_rxon_ht_flags(struct iwn_softc *sc, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; uint32_t htflags = 0; if (! IEEE80211_IS_CHAN_HT(c)) return (0); htflags |= IWN_RXON_HT_PROTMODE(ic->ic_curhtprotmode); if (IEEE80211_IS_CHAN_HT40(c)) { switch (ic->ic_curhtprotmode) { case IEEE80211_HTINFO_OPMODE_HT20PR: htflags |= IWN_RXON_HT_MODEPURE40; break; default: htflags |= IWN_RXON_HT_MODEMIXED; break; } } if (IEEE80211_IS_CHAN_HT40D(c)) htflags |= IWN_RXON_HT_HT40MINUS; return (htflags); } static int iwn_check_bss_filter(struct iwn_softc *sc) { return ((sc->rxon->filter & htole32(IWN_FILTER_BSS)) != 0); } static int iwn4965_rxon_assoc(struct iwn_softc *sc, int async) { struct iwn4965_rxon_assoc cmd; struct iwn_rxon *rxon = sc->rxon; cmd.flags = rxon->flags; cmd.filter = rxon->filter; cmd.ofdm_mask = rxon->ofdm_mask; cmd.cck_mask = rxon->cck_mask; cmd.ht_single_mask = rxon->ht_single_mask; cmd.ht_dual_mask = rxon->ht_dual_mask; cmd.rxchain = rxon->rxchain; cmd.reserved = 0; return (iwn_cmd(sc, IWN_CMD_RXON_ASSOC, &cmd, sizeof(cmd), async)); } static int iwn5000_rxon_assoc(struct iwn_softc *sc, int async) { struct iwn5000_rxon_assoc cmd; struct iwn_rxon *rxon = sc->rxon; cmd.flags = rxon->flags; cmd.filter = rxon->filter; cmd.ofdm_mask = rxon->ofdm_mask; cmd.cck_mask = rxon->cck_mask; cmd.reserved1 = 0; cmd.ht_single_mask = rxon->ht_single_mask; cmd.ht_dual_mask = rxon->ht_dual_mask; cmd.ht_triple_mask = rxon->ht_triple_mask; cmd.reserved2 = 0; cmd.rxchain = rxon->rxchain; cmd.acquisition = rxon->acquisition; cmd.reserved3 = 0; return (iwn_cmd(sc, IWN_CMD_RXON_ASSOC, &cmd, sizeof(cmd), async)); } static int iwn_send_rxon(struct iwn_softc *sc, int assoc, int async) { struct iwn_ops *ops = &sc->ops; int error; IWN_LOCK_ASSERT(sc); if (assoc && iwn_check_bss_filter(sc) != 0) { error = ops->rxon_assoc(sc, async); if (error != 0) { device_printf(sc->sc_dev, "%s: RXON_ASSOC command failed, error %d\n", __func__, error); return (error); } } else { if (sc->sc_is_scanning) device_printf(sc->sc_dev, "%s: is_scanning set, before RXON\n", __func__); error = iwn_cmd(sc, IWN_CMD_RXON, sc->rxon, sc->rxonsz, async); if (error != 0) { device_printf(sc->sc_dev, "%s: RXON command failed, error %d\n", __func__, error); return (error); } /* * Reconfiguring RXON clears the firmware nodes table so * we must add the broadcast node again. */ if (iwn_check_bss_filter(sc) == 0 && (error = iwn_add_broadcast_node(sc, async)) != 0) { device_printf(sc->sc_dev, "%s: could not add broadcast node, error %d\n", __func__, error); return (error); } } /* Configuration has changed, set TX power accordingly. */ if ((error = ops->set_txpower(sc, async)) != 0) { device_printf(sc->sc_dev, "%s: could not set TX power, error %d\n", __func__, error); return (error); } return (0); } static int iwn_config(struct iwn_softc *sc) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); const uint8_t *macaddr; uint32_t txmask; uint16_t rxchain; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); if ((sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) && (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)) { device_printf(sc->sc_dev,"%s: temp_offset and temp_offsetv2 are" " exclusive each together. Review NIC config file. Conf" " : 0x%08x Flags : 0x%08x \n", __func__, sc->base_params->calib_need, (IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET | IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2)); return (EINVAL); } /* Compute temperature calib if needed. Will be send by send calib */ if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSET) { error = iwn5000_temp_offset_calib(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not set temperature offset\n", __func__); return (error); } } else if (sc->base_params->calib_need & IWN_FLG_NEED_PHY_CALIB_TEMP_OFFSETv2) { error = iwn5000_temp_offset_calibv2(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not compute temperature offset v2\n", __func__); return (error); } } if (sc->hw_type == IWN_HW_REV_TYPE_6050) { /* Configure runtime DC calibration. */ error = iwn5000_runtime_calib(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not configure runtime calibration\n", __func__); return error; } } /* Configure valid TX chains for >=5000 Series. */ if (sc->hw_type != IWN_HW_REV_TYPE_4965 && IWN_UCODE_API(sc->ucode_rev) > 1) { txmask = htole32(sc->txchainmask); DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT, "%s: configuring valid TX chains 0x%x\n", __func__, txmask); error = iwn_cmd(sc, IWN5000_CMD_TX_ANT_CONFIG, &txmask, sizeof txmask, 0); if (error != 0) { device_printf(sc->sc_dev, "%s: could not configure valid TX chains, " "error %d\n", __func__, error); return error; } } /* Configure bluetooth coexistence. */ error = 0; /* Configure bluetooth coexistence if needed. */ if (sc->base_params->bt_mode == IWN_BT_ADVANCED) error = iwn_send_advanced_btcoex(sc); if (sc->base_params->bt_mode == IWN_BT_SIMPLE) error = iwn_send_btcoex(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not configure bluetooth coexistence, error %d\n", __func__, error); return error; } /* Set mode, channel, RX filter and enable RX. */ sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; memset(sc->rxon, 0, sizeof (struct iwn_rxon)); macaddr = vap ? vap->iv_myaddr : ic->ic_macaddr; IEEE80211_ADDR_COPY(sc->rxon->myaddr, macaddr); IEEE80211_ADDR_COPY(sc->rxon->wlap, macaddr); sc->rxon->chan = ieee80211_chan2ieee(ic, ic->ic_curchan); sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); if (IEEE80211_IS_CHAN_2GHZ(ic->ic_curchan)) sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); sc->rxon->filter = htole32(IWN_FILTER_MULTICAST); switch (ic->ic_opmode) { case IEEE80211_M_STA: sc->rxon->mode = IWN_MODE_STA; break; case IEEE80211_M_MONITOR: sc->rxon->mode = IWN_MODE_MONITOR; break; default: /* Should not get there. */ break; } iwn_set_promisc(sc); sc->rxon->cck_mask = 0x0f; /* not yet negotiated */ sc->rxon->ofdm_mask = 0xff; /* not yet negotiated */ sc->rxon->ht_single_mask = 0xff; sc->rxon->ht_dual_mask = 0xff; sc->rxon->ht_triple_mask = 0xff; /* * In active association mode, ensure that * all the receive chains are enabled. * * Since we're not yet doing SMPS, don't allow the * number of idle RX chains to be less than the active * number. */ rxchain = IWN_RXCHAIN_VALID(sc->rxchainmask) | IWN_RXCHAIN_MIMO_COUNT(sc->nrxchains) | IWN_RXCHAIN_IDLE_COUNT(sc->nrxchains); sc->rxon->rxchain = htole16(rxchain); DPRINTF(sc, IWN_DEBUG_RESET | IWN_DEBUG_XMIT, "%s: rxchainmask=0x%x, nrxchains=%d\n", __func__, sc->rxchainmask, sc->nrxchains); sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ic->ic_curchan)); DPRINTF(sc, IWN_DEBUG_RESET, "%s: setting configuration; flags=0x%08x\n", __func__, le32toh(sc->rxon->flags)); if ((error = iwn_send_rxon(sc, 0, 0)) != 0) { device_printf(sc->sc_dev, "%s: could not send RXON\n", __func__); return error; } if ((error = iwn_set_critical_temp(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not set critical temperature\n", __func__); return error; } /* Set power saving level to CAM during initialization. */ if ((error = iwn_set_pslevel(sc, 0, 0, 0)) != 0) { device_printf(sc->sc_dev, "%s: could not set power saving level\n", __func__); return error; } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return 0; } static uint16_t iwn_get_active_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c, uint8_t n_probes) { /* No channel? Default to 2GHz settings */ if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { return (IWN_ACTIVE_DWELL_TIME_2GHZ + IWN_ACTIVE_DWELL_FACTOR_2GHZ * (n_probes + 1)); } /* 5GHz dwell time */ return (IWN_ACTIVE_DWELL_TIME_5GHZ + IWN_ACTIVE_DWELL_FACTOR_5GHZ * (n_probes + 1)); } /* * Limit the total dwell time to 85% of the beacon interval. * * Returns the dwell time in milliseconds. */ static uint16_t iwn_limit_dwell(struct iwn_softc *sc, uint16_t dwell_time) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = NULL; int bintval = 0; /* bintval is in TU (1.024mS) */ if (! TAILQ_EMPTY(&ic->ic_vaps)) { vap = TAILQ_FIRST(&ic->ic_vaps); bintval = vap->iv_bss->ni_intval; } /* * If it's non-zero, we should calculate the minimum of * it and the DWELL_BASE. * * XXX Yes, the math should take into account that bintval * is 1.024mS, not 1mS.. */ if (bintval > 0) { DPRINTF(sc, IWN_DEBUG_SCAN, "%s: bintval=%d\n", __func__, bintval); return (MIN(IWN_PASSIVE_DWELL_BASE, ((bintval * 85) / 100))); } /* No association context? Default */ return (IWN_PASSIVE_DWELL_BASE); } static uint16_t iwn_get_passive_dwell_time(struct iwn_softc *sc, struct ieee80211_channel *c) { uint16_t passive; if (c == NULL || IEEE80211_IS_CHAN_2GHZ(c)) { passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_2GHZ; } else { passive = IWN_PASSIVE_DWELL_BASE + IWN_PASSIVE_DWELL_TIME_5GHZ; } /* Clamp to the beacon interval if we're associated */ return (iwn_limit_dwell(sc, passive)); } static int iwn_scan(struct iwn_softc *sc, struct ieee80211vap *vap, struct ieee80211_scan_state *ss, struct ieee80211_channel *c) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni = vap->iv_bss; struct iwn_scan_hdr *hdr; struct iwn_cmd_data *tx; struct iwn_scan_essid *essid; struct iwn_scan_chan *chan; struct ieee80211_frame *wh; struct ieee80211_rateset *rs; uint8_t *buf, *frm; uint16_t rxchain; uint8_t txant; int buflen, error; int is_active; uint16_t dwell_active, dwell_passive; uint32_t extra, scan_service_time; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* * We are absolutely not allowed to send a scan command when another * scan command is pending. */ if (sc->sc_is_scanning) { device_printf(sc->sc_dev, "%s: called whilst scanning!\n", __func__); return (EAGAIN); } /* Assign the scan channel */ c = ic->ic_curchan; sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; buf = malloc(IWN_SCAN_MAXSZ, M_DEVBUF, M_NOWAIT | M_ZERO); if (buf == NULL) { device_printf(sc->sc_dev, "%s: could not allocate buffer for scan command\n", __func__); return ENOMEM; } hdr = (struct iwn_scan_hdr *)buf; /* * Move to the next channel if no frames are received within 10ms * after sending the probe request. */ hdr->quiet_time = htole16(10); /* timeout in milliseconds */ hdr->quiet_threshold = htole16(1); /* min # of packets */ /* * Max needs to be greater than active and passive and quiet! * It's also in microseconds! */ hdr->max_svc = htole32(250 * 1024); /* * Reset scan: interval=100 * Normal scan: interval=becaon interval * suspend_time: 100 (TU) * */ extra = (100 /* suspend_time */ / 100 /* beacon interval */) << 22; //scan_service_time = extra | ((100 /* susp */ % 100 /* int */) * 1024); scan_service_time = (4 << 22) | (100 * 1024); /* Hardcode for now! */ hdr->pause_svc = htole32(scan_service_time); /* Select antennas for scanning. */ rxchain = IWN_RXCHAIN_VALID(sc->rxchainmask) | IWN_RXCHAIN_FORCE_MIMO_SEL(sc->rxchainmask) | IWN_RXCHAIN_DRIVER_FORCE; if (IEEE80211_IS_CHAN_A(c) && sc->hw_type == IWN_HW_REV_TYPE_4965) { /* Ant A must be avoided in 5GHz because of an HW bug. */ rxchain |= IWN_RXCHAIN_FORCE_SEL(IWN_ANT_B); } else /* Use all available RX antennas. */ rxchain |= IWN_RXCHAIN_FORCE_SEL(sc->rxchainmask); hdr->rxchain = htole16(rxchain); hdr->filter = htole32(IWN_FILTER_MULTICAST | IWN_FILTER_BEACON); tx = (struct iwn_cmd_data *)(hdr + 1); tx->flags = htole32(IWN_TX_AUTO_SEQ); tx->id = sc->broadcast_id; tx->lifetime = htole32(IWN_LIFETIME_INFINITE); if (IEEE80211_IS_CHAN_5GHZ(c)) { /* Send probe requests at 6Mbps. */ tx->rate = htole32(0xd); rs = &ic->ic_sup_rates[IEEE80211_MODE_11A]; } else { hdr->flags = htole32(IWN_RXON_24GHZ | IWN_RXON_AUTO); if (sc->hw_type == IWN_HW_REV_TYPE_4965 && sc->rxon->associd && sc->rxon->chan > 14) tx->rate = htole32(0xd); else { /* Send probe requests at 1Mbps. */ tx->rate = htole32(10 | IWN_RFLAG_CCK); } rs = &ic->ic_sup_rates[IEEE80211_MODE_11G]; } /* Use the first valid TX antenna. */ txant = IWN_LSB(sc->txchainmask); tx->rate |= htole32(IWN_RFLAG_ANT(txant)); /* * Only do active scanning if we're announcing a probe request * for a given SSID (or more, if we ever add it to the driver.) */ is_active = 0; /* * If we're scanning for a specific SSID, add it to the command. * * XXX maybe look at adding support for scanning multiple SSIDs? */ essid = (struct iwn_scan_essid *)(tx + 1); if (ss != NULL) { if (ss->ss_ssid[0].len != 0) { essid[0].id = IEEE80211_ELEMID_SSID; essid[0].len = ss->ss_ssid[0].len; memcpy(essid[0].data, ss->ss_ssid[0].ssid, ss->ss_ssid[0].len); } DPRINTF(sc, IWN_DEBUG_SCAN, "%s: ssid_len=%d, ssid=%*s\n", __func__, ss->ss_ssid[0].len, ss->ss_ssid[0].len, ss->ss_ssid[0].ssid); if (ss->ss_nssid > 0) is_active = 1; } /* * Build a probe request frame. Most of the following code is a * copy & paste of what is done in net80211. */ wh = (struct ieee80211_frame *)(essid + 20); wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT | IEEE80211_FC0_SUBTYPE_PROBE_REQ; wh->i_fc[1] = IEEE80211_FC1_DIR_NODS; IEEE80211_ADDR_COPY(wh->i_addr1, vap->iv_ifp->if_broadcastaddr); IEEE80211_ADDR_COPY(wh->i_addr2, IF_LLADDR(vap->iv_ifp)); IEEE80211_ADDR_COPY(wh->i_addr3, vap->iv_ifp->if_broadcastaddr); *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */ *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */ frm = (uint8_t *)(wh + 1); frm = ieee80211_add_ssid(frm, NULL, 0); frm = ieee80211_add_rates(frm, rs); if (rs->rs_nrates > IEEE80211_RATE_SIZE) frm = ieee80211_add_xrates(frm, rs); if (ic->ic_htcaps & IEEE80211_HTC_HT) frm = ieee80211_add_htcap(frm, ni); /* Set length of probe request. */ tx->len = htole16(frm - (uint8_t *)wh); /* * If active scanning is requested but a certain channel is * marked passive, we can do active scanning if we detect * transmissions. * * There is an issue with some firmware versions that triggers * a sysassert on a "good CRC threshold" of zero (== disabled), * on a radar channel even though this means that we should NOT * send probes. * * The "good CRC threshold" is the number of frames that we * need to receive during our dwell time on a channel before * sending out probes -- setting this to a huge value will * mean we never reach it, but at the same time work around * the aforementioned issue. Thus use IWL_GOOD_CRC_TH_NEVER * here instead of IWL_GOOD_CRC_TH_DISABLED. * * This was fixed in later versions along with some other * scan changes, and the threshold behaves as a flag in those * versions. */ /* * If we're doing active scanning, set the crc_threshold * to a suitable value. This is different to active veruss * passive scanning depending upon the channel flags; the * firmware will obey that particular check for us. */ if (sc->tlv_feature_flags & IWN_UCODE_TLV_FLAGS_NEWSCAN) hdr->crc_threshold = is_active ? IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_DISABLED; else hdr->crc_threshold = is_active ? IWN_GOOD_CRC_TH_DEFAULT : IWN_GOOD_CRC_TH_NEVER; chan = (struct iwn_scan_chan *)frm; chan->chan = htole16(ieee80211_chan2ieee(ic, c)); chan->flags = 0; if (ss->ss_nssid > 0) chan->flags |= htole32(IWN_CHAN_NPBREQS(1)); chan->dsp_gain = 0x6e; /* * Set the passive/active flag depending upon the channel mode. * XXX TODO: take the is_active flag into account as well? */ if (c->ic_flags & IEEE80211_CHAN_PASSIVE) chan->flags |= htole32(IWN_CHAN_PASSIVE); else chan->flags |= htole32(IWN_CHAN_ACTIVE); /* * Calculate the active/passive dwell times. */ dwell_active = iwn_get_active_dwell_time(sc, c, ss->ss_nssid); dwell_passive = iwn_get_passive_dwell_time(sc, c); /* Make sure they're valid */ if (dwell_passive <= dwell_active) dwell_passive = dwell_active + 1; chan->active = htole16(dwell_active); chan->passive = htole16(dwell_passive); if (IEEE80211_IS_CHAN_5GHZ(c)) chan->rf_gain = 0x3b; else chan->rf_gain = 0x28; DPRINTF(sc, IWN_DEBUG_STATE, "%s: chan %u flags 0x%x rf_gain 0x%x " "dsp_gain 0x%x active %d passive %d scan_svc_time %d crc 0x%x " "isactive=%d numssid=%d\n", __func__, chan->chan, chan->flags, chan->rf_gain, chan->dsp_gain, dwell_active, dwell_passive, scan_service_time, hdr->crc_threshold, is_active, ss->ss_nssid); hdr->nchan++; chan++; buflen = (uint8_t *)chan - buf; hdr->len = htole16(buflen); if (sc->sc_is_scanning) { device_printf(sc->sc_dev, "%s: called with is_scanning set!\n", __func__); } sc->sc_is_scanning = 1; DPRINTF(sc, IWN_DEBUG_STATE, "sending scan command nchan=%d\n", hdr->nchan); error = iwn_cmd(sc, IWN_CMD_SCAN, buf, buflen, 1); free(buf, M_DEVBUF); if (error == 0) callout_reset(&sc->scan_timeout, 5*hz, iwn_scan_timeout, sc); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return error; } static int iwn_auth(struct iwn_softc *sc, struct ieee80211vap *vap) { struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni = vap->iv_bss; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; /* Update adapter configuration. */ IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); if (ic->ic_flags & IEEE80211_F_SHSLOT) sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { sc->rxon->cck_mask = 0; sc->rxon->ofdm_mask = 0x15; } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { sc->rxon->cck_mask = 0x03; sc->rxon->ofdm_mask = 0; } else { /* Assume 802.11b/g. */ sc->rxon->cck_mask = 0x03; sc->rxon->ofdm_mask = 0x15; } /* try HT */ sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ic->ic_curchan)); DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x cck %x ofdm %x\n", sc->rxon->chan, sc->rxon->flags, sc->rxon->cck_mask, sc->rxon->ofdm_mask); if ((error = iwn_send_rxon(sc, 0, 1)) != 0) { device_printf(sc->sc_dev, "%s: could not send RXON\n", __func__); return (error); } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return (0); } static int iwn_run(struct iwn_softc *sc, struct ieee80211vap *vap) { struct iwn_ops *ops = &sc->ops; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211_node *ni = vap->iv_bss; struct iwn_node_info node; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); sc->rxon = &sc->rx_on[IWN_RXON_BSS_CTX]; if (ic->ic_opmode == IEEE80211_M_MONITOR) { /* Link LED blinks while monitoring. */ iwn_set_led(sc, IWN_LED_LINK, 5, 5); return 0; } if ((error = iwn_set_timing(sc, ni)) != 0) { device_printf(sc->sc_dev, "%s: could not set timing, error %d\n", __func__, error); return error; } /* Update adapter configuration. */ IEEE80211_ADDR_COPY(sc->rxon->bssid, ni->ni_bssid); sc->rxon->associd = htole16(IEEE80211_AID(ni->ni_associd)); sc->rxon->chan = ieee80211_chan2ieee(ic, ni->ni_chan); sc->rxon->flags = htole32(IWN_RXON_TSF | IWN_RXON_CTS_TO_SELF); if (IEEE80211_IS_CHAN_2GHZ(ni->ni_chan)) sc->rxon->flags |= htole32(IWN_RXON_AUTO | IWN_RXON_24GHZ); if (ic->ic_flags & IEEE80211_F_SHSLOT) sc->rxon->flags |= htole32(IWN_RXON_SHSLOT); if (ic->ic_flags & IEEE80211_F_SHPREAMBLE) sc->rxon->flags |= htole32(IWN_RXON_SHPREAMBLE); if (IEEE80211_IS_CHAN_A(ni->ni_chan)) { sc->rxon->cck_mask = 0; sc->rxon->ofdm_mask = 0x15; } else if (IEEE80211_IS_CHAN_B(ni->ni_chan)) { sc->rxon->cck_mask = 0x03; sc->rxon->ofdm_mask = 0; } else { /* Assume 802.11b/g. */ sc->rxon->cck_mask = 0x0f; sc->rxon->ofdm_mask = 0x15; } /* try HT */ sc->rxon->flags |= htole32(iwn_get_rxon_ht_flags(sc, ni->ni_chan)); sc->rxon->filter |= htole32(IWN_FILTER_BSS); DPRINTF(sc, IWN_DEBUG_STATE, "rxon chan %d flags %x, curhtprotmode=%d\n", sc->rxon->chan, le32toh(sc->rxon->flags), ic->ic_curhtprotmode); if ((error = iwn_send_rxon(sc, 0, 1)) != 0) { device_printf(sc->sc_dev, "%s: could not send RXON\n", __func__); return error; } /* Fake a join to initialize the TX rate. */ ((struct iwn_node *)ni)->id = IWN_ID_BSS; iwn_newassoc(ni, 1); /* Add BSS node. */ memset(&node, 0, sizeof node); IEEE80211_ADDR_COPY(node.macaddr, ni->ni_macaddr); node.id = IWN_ID_BSS; if (IEEE80211_IS_CHAN_HT(ni->ni_chan)) { switch (ni->ni_htcap & IEEE80211_HTCAP_SMPS) { case IEEE80211_HTCAP_SMPS_ENA: node.htflags |= htole32(IWN_SMPS_MIMO_DIS); break; case IEEE80211_HTCAP_SMPS_DYNAMIC: node.htflags |= htole32(IWN_SMPS_MIMO_PROT); break; } node.htflags |= htole32(IWN_AMDPU_SIZE_FACTOR(3) | IWN_AMDPU_DENSITY(5)); /* 4us */ if (IEEE80211_IS_CHAN_HT40(ni->ni_chan)) node.htflags |= htole32(IWN_NODE_HT40); } DPRINTF(sc, IWN_DEBUG_STATE, "%s: adding BSS node\n", __func__); error = ops->add_node(sc, &node, 1); if (error != 0) { device_printf(sc->sc_dev, "%s: could not add BSS node, error %d\n", __func__, error); return error; } DPRINTF(sc, IWN_DEBUG_STATE, "%s: setting link quality for node %d\n", __func__, node.id); if ((error = iwn_set_link_quality(sc, ni)) != 0) { device_printf(sc->sc_dev, "%s: could not setup link quality for node %d, error %d\n", __func__, node.id, error); return error; } if ((error = iwn_init_sensitivity(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not set sensitivity, error %d\n", __func__, error); return error; } /* Start periodic calibration timer. */ sc->calib.state = IWN_CALIB_STATE_ASSOC; sc->calib_cnt = 0; callout_reset(&sc->calib_to, msecs_to_ticks(500), iwn_calib_timeout, sc); /* Link LED always on while associated. */ iwn_set_led(sc, IWN_LED_LINK, 0, 1); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return 0; } /* * This function is called by upper layer when an ADDBA request is received * from another STA and before the ADDBA response is sent. */ static int iwn_ampdu_rx_start(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap, int baparamset, int batimeout, int baseqctl) { #define MS(_v, _f) (((_v) & _f) >> _f##_S) struct iwn_softc *sc = ni->ni_ic->ic_softc; struct iwn_ops *ops = &sc->ops; struct iwn_node *wn = (void *)ni; struct iwn_node_info node; uint16_t ssn; uint8_t tid; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); tid = MS(le16toh(baparamset), IEEE80211_BAPS_TID); ssn = MS(le16toh(baseqctl), IEEE80211_BASEQ_START); if (wn->id == IWN_ID_UNDEFINED) return (ENOENT); memset(&node, 0, sizeof node); node.id = wn->id; node.control = IWN_NODE_UPDATE; node.flags = IWN_FLAG_SET_ADDBA; node.addba_tid = tid; node.addba_ssn = htole16(ssn); DPRINTF(sc, IWN_DEBUG_RECV, "ADDBA RA=%d TID=%d SSN=%d\n", wn->id, tid, ssn); error = ops->add_node(sc, &node, 1); if (error != 0) return error; return sc->sc_ampdu_rx_start(ni, rap, baparamset, batimeout, baseqctl); #undef MS } /* * This function is called by upper layer on teardown of an HT-immediate * Block Ack agreement (eg. uppon receipt of a DELBA frame). */ static void iwn_ampdu_rx_stop(struct ieee80211_node *ni, struct ieee80211_rx_ampdu *rap) { struct ieee80211com *ic = ni->ni_ic; struct iwn_softc *sc = ic->ic_softc; struct iwn_ops *ops = &sc->ops; struct iwn_node *wn = (void *)ni; struct iwn_node_info node; uint8_t tid; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); if (wn->id == IWN_ID_UNDEFINED) goto end; /* XXX: tid as an argument */ for (tid = 0; tid < WME_NUM_TID; tid++) { if (&ni->ni_rx_ampdu[tid] == rap) break; } memset(&node, 0, sizeof node); node.id = wn->id; node.control = IWN_NODE_UPDATE; node.flags = IWN_FLAG_SET_DELBA; node.delba_tid = tid; DPRINTF(sc, IWN_DEBUG_RECV, "DELBA RA=%d TID=%d\n", wn->id, tid); (void)ops->add_node(sc, &node, 1); end: sc->sc_ampdu_rx_stop(ni, rap); } static int iwn_addba_request(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int dialogtoken, int baparamset, int batimeout) { struct iwn_softc *sc = ni->ni_ic->ic_softc; int qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); for (qid = sc->firstaggqueue; qid < sc->ntxqs; qid++) { if (sc->qid2tap[qid] == NULL) break; } if (qid == sc->ntxqs) { DPRINTF(sc, IWN_DEBUG_XMIT, "%s: not free aggregation queue\n", __func__); return 0; } tap->txa_private = malloc(sizeof(int), M_DEVBUF, M_NOWAIT); if (tap->txa_private == NULL) { device_printf(sc->sc_dev, "%s: failed to alloc TX aggregation structure\n", __func__); return 0; } sc->qid2tap[qid] = tap; *(int *)tap->txa_private = qid; return sc->sc_addba_request(ni, tap, dialogtoken, baparamset, batimeout); } static int iwn_addba_response(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap, int code, int baparamset, int batimeout) { struct iwn_softc *sc = ni->ni_ic->ic_softc; int qid = *(int *)tap->txa_private; uint8_t tid = tap->txa_tid; int ret; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); if (code == IEEE80211_STATUS_SUCCESS) { ni->ni_txseqs[tid] = tap->txa_start & 0xfff; ret = iwn_ampdu_tx_start(ni->ni_ic, ni, tid); if (ret != 1) return ret; } else { sc->qid2tap[qid] = NULL; free(tap->txa_private, M_DEVBUF); tap->txa_private = NULL; } return sc->sc_addba_response(ni, tap, code, baparamset, batimeout); } /* * This function is called by upper layer when an ADDBA response is received * from another STA. */ static int iwn_ampdu_tx_start(struct ieee80211com *ic, struct ieee80211_node *ni, uint8_t tid) { struct ieee80211_tx_ampdu *tap = &ni->ni_tx_ampdu[tid]; struct iwn_softc *sc = ni->ni_ic->ic_softc; struct iwn_ops *ops = &sc->ops; struct iwn_node *wn = (void *)ni; struct iwn_node_info node; int error, qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); if (wn->id == IWN_ID_UNDEFINED) return (0); /* Enable TX for the specified RA/TID. */ wn->disable_tid &= ~(1 << tid); memset(&node, 0, sizeof node); node.id = wn->id; node.control = IWN_NODE_UPDATE; node.flags = IWN_FLAG_SET_DISABLE_TID; node.disable_tid = htole16(wn->disable_tid); error = ops->add_node(sc, &node, 1); if (error != 0) return 0; if ((error = iwn_nic_lock(sc)) != 0) return 0; qid = *(int *)tap->txa_private; DPRINTF(sc, IWN_DEBUG_XMIT, "%s: ra=%d tid=%d ssn=%d qid=%d\n", __func__, wn->id, tid, tap->txa_start, qid); ops->ampdu_tx_start(sc, ni, qid, tid, tap->txa_start & 0xfff); iwn_nic_unlock(sc); iwn_set_link_quality(sc, ni); return 1; } static void iwn_ampdu_tx_stop(struct ieee80211_node *ni, struct ieee80211_tx_ampdu *tap) { struct iwn_softc *sc = ni->ni_ic->ic_softc; struct iwn_ops *ops = &sc->ops; uint8_t tid = tap->txa_tid; int qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); sc->sc_addba_stop(ni, tap); if (tap->txa_private == NULL) return; qid = *(int *)tap->txa_private; if (sc->txq[qid].queued != 0) return; if (iwn_nic_lock(sc) != 0) return; ops->ampdu_tx_stop(sc, qid, tid, tap->txa_start & 0xfff); iwn_nic_unlock(sc); sc->qid2tap[qid] = NULL; free(tap->txa_private, M_DEVBUF); tap->txa_private = NULL; } static void iwn4965_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, int qid, uint8_t tid, uint16_t ssn) { struct iwn_node *wn = (void *)ni; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Stop TX scheduler while we're changing its configuration. */ iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), IWN4965_TXQ_STATUS_CHGACT); /* Assign RA/TID translation to the queue. */ iwn_mem_write_2(sc, sc->sched_base + IWN4965_SCHED_TRANS_TBL(qid), wn->id << 4 | tid); /* Enable chain-building mode for the queue. */ iwn_prph_setbits(sc, IWN4965_SCHED_QCHAIN_SEL, 1 << qid); /* Set starting sequence number from the ADDBA request. */ sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); /* Set scheduler window size. */ iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); /* Set scheduler frame limit. */ iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, IWN_SCHED_LIMIT << 16); /* Enable interrupts for the queue. */ iwn_prph_setbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); /* Mark the queue as active. */ iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), IWN4965_TXQ_STATUS_ACTIVE | IWN4965_TXQ_STATUS_AGGR_ENA | iwn_tid2fifo[tid] << 1); } static void iwn4965_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) { DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Stop TX scheduler while we're changing its configuration. */ iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), IWN4965_TXQ_STATUS_CHGACT); /* Set starting sequence number from the ADDBA request. */ IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), ssn); /* Disable interrupts for the queue. */ iwn_prph_clrbits(sc, IWN4965_SCHED_INTR_MASK, 1 << qid); /* Mark the queue as inactive. */ iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), IWN4965_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid] << 1); } static void iwn5000_ampdu_tx_start(struct iwn_softc *sc, struct ieee80211_node *ni, int qid, uint8_t tid, uint16_t ssn) { DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); struct iwn_node *wn = (void *)ni; /* Stop TX scheduler while we're changing its configuration. */ iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), IWN5000_TXQ_STATUS_CHGACT); /* Assign RA/TID translation to the queue. */ iwn_mem_write_2(sc, sc->sched_base + IWN5000_SCHED_TRANS_TBL(qid), wn->id << 4 | tid); /* Enable chain-building mode for the queue. */ iwn_prph_setbits(sc, IWN5000_SCHED_QCHAIN_SEL, 1 << qid); /* Enable aggregation for the queue. */ iwn_prph_setbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); /* Set starting sequence number from the ADDBA request. */ sc->txq[qid].cur = sc->txq[qid].read = (ssn & 0xff); IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); /* Set scheduler window size and frame limit. */ iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); /* Enable interrupts for the queue. */ iwn_prph_setbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); /* Mark the queue as active. */ iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), IWN5000_TXQ_STATUS_ACTIVE | iwn_tid2fifo[tid]); } static void iwn5000_ampdu_tx_stop(struct iwn_softc *sc, int qid, uint8_t tid, uint16_t ssn) { DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Stop TX scheduler while we're changing its configuration. */ iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), IWN5000_TXQ_STATUS_CHGACT); /* Disable aggregation for the queue. */ iwn_prph_clrbits(sc, IWN5000_SCHED_AGGR_SEL, 1 << qid); /* Set starting sequence number from the ADDBA request. */ IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | (ssn & 0xff)); iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), ssn); /* Disable interrupts for the queue. */ iwn_prph_clrbits(sc, IWN5000_SCHED_INTR_MASK, 1 << qid); /* Mark the queue as inactive. */ iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), IWN5000_TXQ_STATUS_INACTIVE | iwn_tid2fifo[tid]); } /* * Query calibration tables from the initialization firmware. We do this * only once at first boot. Called from a process context. */ static int iwn5000_query_calibration(struct iwn_softc *sc) { struct iwn5000_calib_config cmd; int error; memset(&cmd, 0, sizeof cmd); cmd.ucode.once.enable = htole32(0xffffffff); cmd.ucode.once.start = htole32(0xffffffff); cmd.ucode.once.send = htole32(0xffffffff); cmd.ucode.flags = htole32(0xffffffff); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "%s: sending calibration query\n", __func__); error = iwn_cmd(sc, IWN5000_CMD_CALIB_CONFIG, &cmd, sizeof cmd, 0); if (error != 0) return error; /* Wait at most two seconds for calibration to complete. */ if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) error = msleep(sc, &sc->sc_mtx, PCATCH, "iwncal", 2 * hz); return error; } /* * Send calibration results to the runtime firmware. These results were * obtained on first boot from the initialization firmware. */ static int iwn5000_send_calibration(struct iwn_softc *sc) { int idx, error; for (idx = 0; idx < IWN5000_PHY_CALIB_MAX_RESULT; idx++) { if (!(sc->base_params->calib_need & (1<calibcmd[idx].buf == NULL) { DPRINTF(sc, IWN_DEBUG_CALIBRATE, "Need calib idx : %d but no available data\n", idx); continue; } DPRINTF(sc, IWN_DEBUG_CALIBRATE, "send calibration result idx=%d len=%d\n", idx, sc->calibcmd[idx].len); error = iwn_cmd(sc, IWN_CMD_PHY_CALIB, sc->calibcmd[idx].buf, sc->calibcmd[idx].len, 0); if (error != 0) { device_printf(sc->sc_dev, "%s: could not send calibration result, error %d\n", __func__, error); return error; } } return 0; } static int iwn5000_send_wimax_coex(struct iwn_softc *sc) { struct iwn5000_wimax_coex wimax; #if 0 if (sc->hw_type == IWN_HW_REV_TYPE_6050) { /* Enable WiMAX coexistence for combo adapters. */ wimax.flags = IWN_WIMAX_COEX_ASSOC_WA_UNMASK | IWN_WIMAX_COEX_UNASSOC_WA_UNMASK | IWN_WIMAX_COEX_STA_TABLE_VALID | IWN_WIMAX_COEX_ENABLE; memcpy(wimax.events, iwn6050_wimax_events, sizeof iwn6050_wimax_events); } else #endif { /* Disable WiMAX coexistence. */ wimax.flags = 0; memset(wimax.events, 0, sizeof wimax.events); } DPRINTF(sc, IWN_DEBUG_RESET, "%s: Configuring WiMAX coexistence\n", __func__); return iwn_cmd(sc, IWN5000_CMD_WIMAX_COEX, &wimax, sizeof wimax, 0); } static int iwn5000_crystal_calib(struct iwn_softc *sc) { struct iwn5000_phy_calib_crystal cmd; memset(&cmd, 0, sizeof cmd); cmd.code = IWN5000_PHY_CALIB_CRYSTAL; cmd.ngroups = 1; cmd.isvalid = 1; cmd.cap_pin[0] = le32toh(sc->eeprom_crystal) & 0xff; cmd.cap_pin[1] = (le32toh(sc->eeprom_crystal) >> 16) & 0xff; DPRINTF(sc, IWN_DEBUG_CALIBRATE, "sending crystal calibration %d, %d\n", cmd.cap_pin[0], cmd.cap_pin[1]); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); } static int iwn5000_temp_offset_calib(struct iwn_softc *sc) { struct iwn5000_phy_calib_temp_offset cmd; memset(&cmd, 0, sizeof cmd); cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; cmd.ngroups = 1; cmd.isvalid = 1; if (sc->eeprom_temp != 0) cmd.offset = htole16(sc->eeprom_temp); else cmd.offset = htole16(IWN_DEFAULT_TEMP_OFFSET); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor offset to %d\n", le16toh(cmd.offset)); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); } static int iwn5000_temp_offset_calibv2(struct iwn_softc *sc) { struct iwn5000_phy_calib_temp_offsetv2 cmd; memset(&cmd, 0, sizeof cmd); cmd.code = IWN5000_PHY_CALIB_TEMP_OFFSET; cmd.ngroups = 1; cmd.isvalid = 1; if (sc->eeprom_temp != 0) { cmd.offset_low = htole16(sc->eeprom_temp); cmd.offset_high = htole16(sc->eeprom_temp_high); } else { cmd.offset_low = htole16(IWN_DEFAULT_TEMP_OFFSET); cmd.offset_high = htole16(IWN_DEFAULT_TEMP_OFFSET); } cmd.burnt_voltage_ref = htole16(sc->eeprom_voltage); DPRINTF(sc, IWN_DEBUG_CALIBRATE, "setting radio sensor low offset to %d, high offset to %d, voltage to %d\n", le16toh(cmd.offset_low), le16toh(cmd.offset_high), le16toh(cmd.burnt_voltage_ref)); return iwn_cmd(sc, IWN_CMD_PHY_CALIB, &cmd, sizeof cmd, 0); } /* * This function is called after the runtime firmware notifies us of its * readiness (called in a process context). */ static int iwn4965_post_alive(struct iwn_softc *sc) { int error, qid; if ((error = iwn_nic_lock(sc)) != 0) return error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Clear TX scheduler state in SRAM. */ sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); iwn_mem_set_region_4(sc, sc->sched_base + IWN4965_SCHED_CTX_OFF, 0, IWN4965_SCHED_CTX_LEN / sizeof (uint32_t)); /* Set physical address of TX scheduler rings (1KB aligned). */ iwn_prph_write(sc, IWN4965_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); /* Disable chain mode for all our 16 queues. */ iwn_prph_write(sc, IWN4965_SCHED_QCHAIN_SEL, 0); for (qid = 0; qid < IWN4965_NTXQUEUES; qid++) { iwn_prph_write(sc, IWN4965_SCHED_QUEUE_RDPTR(qid), 0); IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); /* Set scheduler window size. */ iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid), IWN_SCHED_WINSZ); /* Set scheduler frame limit. */ iwn_mem_write(sc, sc->sched_base + IWN4965_SCHED_QUEUE_OFFSET(qid) + 4, IWN_SCHED_LIMIT << 16); } /* Enable interrupts for all our 16 queues. */ iwn_prph_write(sc, IWN4965_SCHED_INTR_MASK, 0xffff); /* Identify TX FIFO rings (0-7). */ iwn_prph_write(sc, IWN4965_SCHED_TXFACT, 0xff); /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ for (qid = 0; qid < 7; qid++) { static uint8_t qid2fifo[] = { 3, 2, 1, 0, 4, 5, 6 }; iwn_prph_write(sc, IWN4965_SCHED_QUEUE_STATUS(qid), IWN4965_TXQ_STATUS_ACTIVE | qid2fifo[qid] << 1); } iwn_nic_unlock(sc); return 0; } /* * This function is called after the initialization or runtime firmware * notifies us of its readiness (called in a process context). */ static int iwn5000_post_alive(struct iwn_softc *sc) { int error, qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Switch to using ICT interrupt mode. */ iwn5000_ict_reset(sc); if ((error = iwn_nic_lock(sc)) != 0){ DPRINTF(sc, IWN_DEBUG_TRACE, "->%s end in error\n", __func__); return error; } /* Clear TX scheduler state in SRAM. */ sc->sched_base = iwn_prph_read(sc, IWN_SCHED_SRAM_ADDR); iwn_mem_set_region_4(sc, sc->sched_base + IWN5000_SCHED_CTX_OFF, 0, IWN5000_SCHED_CTX_LEN / sizeof (uint32_t)); /* Set physical address of TX scheduler rings (1KB aligned). */ iwn_prph_write(sc, IWN5000_SCHED_DRAM_ADDR, sc->sched_dma.paddr >> 10); IWN_SETBITS(sc, IWN_FH_TX_CHICKEN, IWN_FH_TX_CHICKEN_SCHED_RETRY); /* Enable chain mode for all queues, except command queue. */ if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffdf); else iwn_prph_write(sc, IWN5000_SCHED_QCHAIN_SEL, 0xfffef); iwn_prph_write(sc, IWN5000_SCHED_AGGR_SEL, 0); for (qid = 0; qid < IWN5000_NTXQUEUES; qid++) { iwn_prph_write(sc, IWN5000_SCHED_QUEUE_RDPTR(qid), 0); IWN_WRITE(sc, IWN_HBUS_TARG_WRPTR, qid << 8 | 0); iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid), 0); /* Set scheduler window size and frame limit. */ iwn_mem_write(sc, sc->sched_base + IWN5000_SCHED_QUEUE_OFFSET(qid) + 4, IWN_SCHED_LIMIT << 16 | IWN_SCHED_WINSZ); } /* Enable interrupts for all our 20 queues. */ iwn_prph_write(sc, IWN5000_SCHED_INTR_MASK, 0xfffff); /* Identify TX FIFO rings (0-7). */ iwn_prph_write(sc, IWN5000_SCHED_TXFACT, 0xff); /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ if (sc->sc_flags & IWN_FLAG_PAN_SUPPORT) { /* Mark TX rings as active. */ for (qid = 0; qid < 11; qid++) { static uint8_t qid2fifo[] = { 3, 2, 1, 0, 0, 4, 2, 5, 4, 7, 5 }; iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); } } else { /* Mark TX rings (4 EDCA + cmd + 2 HCCA) as active. */ for (qid = 0; qid < 7; qid++) { static uint8_t qid2fifo[] = { 3, 2, 1, 0, 7, 5, 6 }; iwn_prph_write(sc, IWN5000_SCHED_QUEUE_STATUS(qid), IWN5000_TXQ_STATUS_ACTIVE | qid2fifo[qid]); } } iwn_nic_unlock(sc); /* Configure WiMAX coexistence for combo adapters. */ error = iwn5000_send_wimax_coex(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not configure WiMAX coexistence, error %d\n", __func__, error); return error; } if (sc->hw_type != IWN_HW_REV_TYPE_5150) { /* Perform crystal calibration. */ error = iwn5000_crystal_calib(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: crystal calibration failed, error %d\n", __func__, error); return error; } } if (!(sc->sc_flags & IWN_FLAG_CALIB_DONE)) { /* Query calibration from the initialization firmware. */ if ((error = iwn5000_query_calibration(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not query calibration, error %d\n", __func__, error); return error; } /* * We have the calibration results now, reboot with the * runtime firmware (call ourselves recursively!) */ iwn_hw_stop(sc); error = iwn_hw_init(sc); } else { /* Send calibration results to runtime firmware. */ error = iwn5000_send_calibration(sc); } DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return error; } /* * The firmware boot code is small and is intended to be copied directly into * the NIC internal memory (no DMA transfer). */ static int iwn4965_load_bootcode(struct iwn_softc *sc, const uint8_t *ucode, int size) { int error, ntries; size /= sizeof (uint32_t); if ((error = iwn_nic_lock(sc)) != 0) return error; /* Copy microcode image into NIC memory. */ iwn_prph_write_region_4(sc, IWN_BSM_SRAM_BASE, (const uint32_t *)ucode, size); iwn_prph_write(sc, IWN_BSM_WR_MEM_SRC, 0); iwn_prph_write(sc, IWN_BSM_WR_MEM_DST, IWN_FW_TEXT_BASE); iwn_prph_write(sc, IWN_BSM_WR_DWCOUNT, size); /* Start boot load now. */ iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START); /* Wait for transfer to complete. */ for (ntries = 0; ntries < 1000; ntries++) { if (!(iwn_prph_read(sc, IWN_BSM_WR_CTRL) & IWN_BSM_WR_CTRL_START)) break; DELAY(10); } if (ntries == 1000) { device_printf(sc->sc_dev, "%s: could not load boot firmware\n", __func__); iwn_nic_unlock(sc); return ETIMEDOUT; } /* Enable boot after power up. */ iwn_prph_write(sc, IWN_BSM_WR_CTRL, IWN_BSM_WR_CTRL_START_EN); iwn_nic_unlock(sc); return 0; } static int iwn4965_load_firmware(struct iwn_softc *sc) { struct iwn_fw_info *fw = &sc->fw; struct iwn_dma_info *dma = &sc->fw_dma; int error; /* Copy initialization sections into pre-allocated DMA-safe memory. */ memcpy(dma->vaddr, fw->init.data, fw->init.datasz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, fw->init.text, fw->init.textsz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); /* Tell adapter where to find initialization sections. */ if ((error = iwn_nic_lock(sc)) != 0) return error; iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->init.datasz); iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, fw->init.textsz); iwn_nic_unlock(sc); /* Load firmware boot code. */ error = iwn4965_load_bootcode(sc, fw->boot.text, fw->boot.textsz); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load boot firmware\n", __func__); return error; } /* Now press "execute". */ IWN_WRITE(sc, IWN_RESET, 0); /* Wait at most one second for first alive notification. */ if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { device_printf(sc->sc_dev, "%s: timeout waiting for adapter to initialize, error %d\n", __func__, error); return error; } /* Retrieve current temperature for initial TX power calibration. */ sc->rawtemp = sc->ucode_info.temp[3].chan20MHz; sc->temp = iwn4965_get_temperature(sc); /* Copy runtime sections into pre-allocated DMA-safe memory. */ memcpy(dma->vaddr, fw->main.data, fw->main.datasz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); memcpy(dma->vaddr + IWN4965_FW_DATA_MAXSZ, fw->main.text, fw->main.textsz); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); /* Tell adapter where to find runtime sections. */ if ((error = iwn_nic_lock(sc)) != 0) return error; iwn_prph_write(sc, IWN_BSM_DRAM_DATA_ADDR, dma->paddr >> 4); iwn_prph_write(sc, IWN_BSM_DRAM_DATA_SIZE, fw->main.datasz); iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_ADDR, (dma->paddr + IWN4965_FW_DATA_MAXSZ) >> 4); iwn_prph_write(sc, IWN_BSM_DRAM_TEXT_SIZE, IWN_FW_UPDATED | fw->main.textsz); iwn_nic_unlock(sc); return 0; } static int iwn5000_load_firmware_section(struct iwn_softc *sc, uint32_t dst, const uint8_t *section, int size) { struct iwn_dma_info *dma = &sc->fw_dma; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Copy firmware section into pre-allocated DMA-safe memory. */ memcpy(dma->vaddr, section, size); bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE); if ((error = iwn_nic_lock(sc)) != 0) return error; IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), IWN_FH_TX_CONFIG_DMA_PAUSE); IWN_WRITE(sc, IWN_FH_SRAM_ADDR(IWN_SRVC_DMACHNL), dst); IWN_WRITE(sc, IWN_FH_TFBD_CTRL0(IWN_SRVC_DMACHNL), IWN_LOADDR(dma->paddr)); IWN_WRITE(sc, IWN_FH_TFBD_CTRL1(IWN_SRVC_DMACHNL), IWN_HIADDR(dma->paddr) << 28 | size); IWN_WRITE(sc, IWN_FH_TXBUF_STATUS(IWN_SRVC_DMACHNL), IWN_FH_TXBUF_STATUS_TBNUM(1) | IWN_FH_TXBUF_STATUS_TBIDX(1) | IWN_FH_TXBUF_STATUS_TFBD_VALID); /* Kick Flow Handler to start DMA transfer. */ IWN_WRITE(sc, IWN_FH_TX_CONFIG(IWN_SRVC_DMACHNL), IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_CIRQ_HOST_ENDTFD); iwn_nic_unlock(sc); /* Wait at most five seconds for FH DMA transfer to complete. */ return msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", 5 * hz); } static int iwn5000_load_firmware(struct iwn_softc *sc) { struct iwn_fw_part *fw; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Load the initialization firmware on first boot only. */ fw = (sc->sc_flags & IWN_FLAG_CALIB_DONE) ? &sc->fw.main : &sc->fw.init; error = iwn5000_load_firmware_section(sc, IWN_FW_TEXT_BASE, fw->text, fw->textsz); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load firmware %s section, error %d\n", __func__, ".text", error); return error; } error = iwn5000_load_firmware_section(sc, IWN_FW_DATA_BASE, fw->data, fw->datasz); if (error != 0) { device_printf(sc->sc_dev, "%s: could not load firmware %s section, error %d\n", __func__, ".data", error); return error; } /* Now press "execute". */ IWN_WRITE(sc, IWN_RESET, 0); return 0; } /* * Extract text and data sections from a legacy firmware image. */ static int iwn_read_firmware_leg(struct iwn_softc *sc, struct iwn_fw_info *fw) { const uint32_t *ptr; size_t hdrlen = 24; uint32_t rev; ptr = (const uint32_t *)fw->data; rev = le32toh(*ptr++); sc->ucode_rev = rev; /* Check firmware API version. */ if (IWN_FW_API(rev) <= 1) { device_printf(sc->sc_dev, "%s: bad firmware, need API version >=2\n", __func__); return EINVAL; } if (IWN_FW_API(rev) >= 3) { /* Skip build number (version 2 header). */ hdrlen += 4; ptr++; } if (fw->size < hdrlen) { device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", __func__, fw->size); return EINVAL; } fw->main.textsz = le32toh(*ptr++); fw->main.datasz = le32toh(*ptr++); fw->init.textsz = le32toh(*ptr++); fw->init.datasz = le32toh(*ptr++); fw->boot.textsz = le32toh(*ptr++); /* Check that all firmware sections fit. */ if (fw->size < hdrlen + fw->main.textsz + fw->main.datasz + fw->init.textsz + fw->init.datasz + fw->boot.textsz) { device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", __func__, fw->size); return EINVAL; } /* Get pointers to firmware sections. */ fw->main.text = (const uint8_t *)ptr; fw->main.data = fw->main.text + fw->main.textsz; fw->init.text = fw->main.data + fw->main.datasz; fw->init.data = fw->init.text + fw->init.textsz; fw->boot.text = fw->init.data + fw->init.datasz; return 0; } /* * Extract text and data sections from a TLV firmware image. */ static int iwn_read_firmware_tlv(struct iwn_softc *sc, struct iwn_fw_info *fw, uint16_t alt) { const struct iwn_fw_tlv_hdr *hdr; const struct iwn_fw_tlv *tlv; const uint8_t *ptr, *end; uint64_t altmask; uint32_t len, tmp; if (fw->size < sizeof (*hdr)) { device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", __func__, fw->size); return EINVAL; } hdr = (const struct iwn_fw_tlv_hdr *)fw->data; if (hdr->signature != htole32(IWN_FW_SIGNATURE)) { device_printf(sc->sc_dev, "%s: bad firmware signature 0x%08x\n", __func__, le32toh(hdr->signature)); return EINVAL; } DPRINTF(sc, IWN_DEBUG_RESET, "FW: \"%.64s\", build 0x%x\n", hdr->descr, le32toh(hdr->build)); sc->ucode_rev = le32toh(hdr->rev); /* * Select the closest supported alternative that is less than * or equal to the specified one. */ altmask = le64toh(hdr->altmask); while (alt > 0 && !(altmask & (1ULL << alt))) alt--; /* Downgrade. */ DPRINTF(sc, IWN_DEBUG_RESET, "using alternative %d\n", alt); ptr = (const uint8_t *)(hdr + 1); end = (const uint8_t *)(fw->data + fw->size); /* Parse type-length-value fields. */ while (ptr + sizeof (*tlv) <= end) { tlv = (const struct iwn_fw_tlv *)ptr; len = le32toh(tlv->len); ptr += sizeof (*tlv); if (ptr + len > end) { device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", __func__, fw->size); return EINVAL; } /* Skip other alternatives. */ if (tlv->alt != 0 && tlv->alt != htole16(alt)) goto next; switch (le16toh(tlv->type)) { case IWN_FW_TLV_MAIN_TEXT: fw->main.text = ptr; fw->main.textsz = len; break; case IWN_FW_TLV_MAIN_DATA: fw->main.data = ptr; fw->main.datasz = len; break; case IWN_FW_TLV_INIT_TEXT: fw->init.text = ptr; fw->init.textsz = len; break; case IWN_FW_TLV_INIT_DATA: fw->init.data = ptr; fw->init.datasz = len; break; case IWN_FW_TLV_BOOT_TEXT: fw->boot.text = ptr; fw->boot.textsz = len; break; case IWN_FW_TLV_ENH_SENS: if (!len) sc->sc_flags |= IWN_FLAG_ENH_SENS; break; case IWN_FW_TLV_PHY_CALIB: tmp = le32toh(*ptr); if (tmp < 253) { sc->reset_noise_gain = tmp; sc->noise_gain = tmp + 1; } break; case IWN_FW_TLV_PAN: sc->sc_flags |= IWN_FLAG_PAN_SUPPORT; DPRINTF(sc, IWN_DEBUG_RESET, "PAN Support found: %d\n", 1); break; case IWN_FW_TLV_FLAGS: if (len < sizeof(uint32_t)) break; if (len % sizeof(uint32_t)) break; sc->tlv_feature_flags = le32toh(*ptr); DPRINTF(sc, IWN_DEBUG_RESET, "%s: feature: 0x%08x\n", __func__, sc->tlv_feature_flags); break; case IWN_FW_TLV_PBREQ_MAXLEN: case IWN_FW_TLV_RUNT_EVTLOG_PTR: case IWN_FW_TLV_RUNT_EVTLOG_SIZE: case IWN_FW_TLV_RUNT_ERRLOG_PTR: case IWN_FW_TLV_INIT_EVTLOG_PTR: case IWN_FW_TLV_INIT_EVTLOG_SIZE: case IWN_FW_TLV_INIT_ERRLOG_PTR: case IWN_FW_TLV_WOWLAN_INST: case IWN_FW_TLV_WOWLAN_DATA: DPRINTF(sc, IWN_DEBUG_RESET, "TLV type %d recognized but not handled\n", le16toh(tlv->type)); break; default: DPRINTF(sc, IWN_DEBUG_RESET, "TLV type %d not handled\n", le16toh(tlv->type)); break; } next: /* TLV fields are 32-bit aligned. */ ptr += (len + 3) & ~3; } return 0; } static int iwn_read_firmware(struct iwn_softc *sc) { struct iwn_fw_info *fw = &sc->fw; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); IWN_UNLOCK(sc); memset(fw, 0, sizeof (*fw)); /* Read firmware image from filesystem. */ sc->fw_fp = firmware_get(sc->fwname); if (sc->fw_fp == NULL) { device_printf(sc->sc_dev, "%s: could not read firmware %s\n", __func__, sc->fwname); IWN_LOCK(sc); return EINVAL; } IWN_LOCK(sc); fw->size = sc->fw_fp->datasize; fw->data = (const uint8_t *)sc->fw_fp->data; if (fw->size < sizeof (uint32_t)) { device_printf(sc->sc_dev, "%s: firmware too short: %zu bytes\n", __func__, fw->size); error = EINVAL; goto fail; } /* Retrieve text and data sections. */ if (*(const uint32_t *)fw->data != 0) /* Legacy image. */ error = iwn_read_firmware_leg(sc, fw); else error = iwn_read_firmware_tlv(sc, fw, 1); if (error != 0) { device_printf(sc->sc_dev, "%s: could not read firmware sections, error %d\n", __func__, error); goto fail; } device_printf(sc->sc_dev, "%s: ucode rev=0x%08x\n", __func__, sc->ucode_rev); /* Make sure text and data sections fit in hardware memory. */ if (fw->main.textsz > sc->fw_text_maxsz || fw->main.datasz > sc->fw_data_maxsz || fw->init.textsz > sc->fw_text_maxsz || fw->init.datasz > sc->fw_data_maxsz || fw->boot.textsz > IWN_FW_BOOT_TEXT_MAXSZ || (fw->boot.textsz & 3) != 0) { device_printf(sc->sc_dev, "%s: firmware sections too large\n", __func__); error = EINVAL; goto fail; } /* We can proceed with loading the firmware. */ return 0; fail: iwn_unload_firmware(sc); return error; } static void iwn_unload_firmware(struct iwn_softc *sc) { firmware_put(sc->fw_fp, FIRMWARE_UNLOAD); sc->fw_fp = NULL; } static int iwn_clock_wait(struct iwn_softc *sc) { int ntries; /* Set "initialization complete" bit. */ IWN_SETBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); /* Wait for clock stabilization. */ for (ntries = 0; ntries < 2500; ntries++) { if (IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_MAC_CLOCK_READY) return 0; DELAY(10); } device_printf(sc->sc_dev, "%s: timeout waiting for clock stabilization\n", __func__); return ETIMEDOUT; } static int iwn_apm_init(struct iwn_softc *sc) { uint32_t reg; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Disable L0s exit timer (NMI bug workaround). */ IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_DIS_L0S_TIMER); /* Don't wait for ICH L0s (ICH bug workaround). */ IWN_SETBITS(sc, IWN_GIO_CHICKEN, IWN_GIO_CHICKEN_L1A_NO_L0S_RX); /* Set FH wait threshold to max (HW bug under stress workaround). */ IWN_SETBITS(sc, IWN_DBG_HPET_MEM, 0xffff0000); /* Enable HAP INTA to move adapter from L1a to L0s. */ IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_HAP_WAKE_L1A); /* Retrieve PCIe Active State Power Management (ASPM). */ reg = pci_read_config(sc->sc_dev, sc->sc_cap_off + PCIER_LINK_CTL, 4); /* Workaround for HW instability in PCIe L0->L0s->L1 transition. */ if (reg & PCIEM_LINK_CTL_ASPMC_L1) /* L1 Entry enabled. */ IWN_SETBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); else IWN_CLRBITS(sc, IWN_GIO, IWN_GIO_L0S_ENA); if (sc->base_params->pll_cfg_val) IWN_SETBITS(sc, IWN_ANA_PLL, sc->base_params->pll_cfg_val); /* Wait for clock stabilization before accessing prph. */ if ((error = iwn_clock_wait(sc)) != 0) return error; if ((error = iwn_nic_lock(sc)) != 0) return error; if (sc->hw_type == IWN_HW_REV_TYPE_4965) { /* Enable DMA and BSM (Bootstrap State Machine). */ iwn_prph_write(sc, IWN_APMG_CLK_EN, IWN_APMG_CLK_CTRL_DMA_CLK_RQT | IWN_APMG_CLK_CTRL_BSM_CLK_RQT); } else { /* Enable DMA. */ iwn_prph_write(sc, IWN_APMG_CLK_EN, IWN_APMG_CLK_CTRL_DMA_CLK_RQT); } DELAY(20); /* Disable L1-Active. */ iwn_prph_setbits(sc, IWN_APMG_PCI_STT, IWN_APMG_PCI_STT_L1A_DIS); iwn_nic_unlock(sc); return 0; } static void iwn_apm_stop_master(struct iwn_softc *sc) { int ntries; /* Stop busmaster DMA activity. */ IWN_SETBITS(sc, IWN_RESET, IWN_RESET_STOP_MASTER); for (ntries = 0; ntries < 100; ntries++) { if (IWN_READ(sc, IWN_RESET) & IWN_RESET_MASTER_DISABLED) return; DELAY(10); } device_printf(sc->sc_dev, "%s: timeout waiting for master\n", __func__); } static void iwn_apm_stop(struct iwn_softc *sc) { iwn_apm_stop_master(sc); /* Reset the entire device. */ IWN_SETBITS(sc, IWN_RESET, IWN_RESET_SW); DELAY(10); /* Clear "initialization complete" bit. */ IWN_CLRBITS(sc, IWN_GP_CNTRL, IWN_GP_CNTRL_INIT_DONE); } static int iwn4965_nic_config(struct iwn_softc *sc) { DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); if (IWN_RFCFG_TYPE(sc->rfcfg) == 1) { /* * I don't believe this to be correct but this is what the * vendor driver is doing. Probably the bits should not be * shifted in IWN_RFCFG_*. */ IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_RFCFG_TYPE(sc->rfcfg) | IWN_RFCFG_STEP(sc->rfcfg) | IWN_RFCFG_DASH(sc->rfcfg)); } IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); return 0; } static int iwn5000_nic_config(struct iwn_softc *sc) { uint32_t tmp; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); if (IWN_RFCFG_TYPE(sc->rfcfg) < 3) { IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_RFCFG_TYPE(sc->rfcfg) | IWN_RFCFG_STEP(sc->rfcfg) | IWN_RFCFG_DASH(sc->rfcfg)); } IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_RADIO_SI | IWN_HW_IF_CONFIG_MAC_SI); if ((error = iwn_nic_lock(sc)) != 0) return error; iwn_prph_setbits(sc, IWN_APMG_PS, IWN_APMG_PS_EARLY_PWROFF_DIS); if (sc->hw_type == IWN_HW_REV_TYPE_1000) { /* * Select first Switching Voltage Regulator (1.32V) to * solve a stability issue related to noisy DC2DC line * in the silicon of 1000 Series. */ tmp = iwn_prph_read(sc, IWN_APMG_DIGITAL_SVR); tmp &= ~IWN_APMG_DIGITAL_SVR_VOLTAGE_MASK; tmp |= IWN_APMG_DIGITAL_SVR_VOLTAGE_1_32; iwn_prph_write(sc, IWN_APMG_DIGITAL_SVR, tmp); } iwn_nic_unlock(sc); if (sc->sc_flags & IWN_FLAG_INTERNAL_PA) { /* Use internal power amplifier only. */ IWN_WRITE(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_RADIO_2X2_IPA); } if (sc->base_params->additional_nic_config && sc->calib_ver >= 6) { /* Indicate that ROM calibration version is >=6. */ IWN_SETBITS(sc, IWN_GP_DRIVER, IWN_GP_DRIVER_CALIB_VER6); } if (sc->base_params->additional_gp_drv_bit) IWN_SETBITS(sc, IWN_GP_DRIVER, sc->base_params->additional_gp_drv_bit); return 0; } /* * Take NIC ownership over Intel Active Management Technology (AMT). */ static int iwn_hw_prepare(struct iwn_softc *sc) { int ntries; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); /* Check if hardware is ready. */ IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); for (ntries = 0; ntries < 5; ntries++) { if (IWN_READ(sc, IWN_HW_IF_CONFIG) & IWN_HW_IF_CONFIG_NIC_READY) return 0; DELAY(10); } /* Hardware not ready, force into ready state. */ IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_PREPARE); for (ntries = 0; ntries < 15000; ntries++) { if (!(IWN_READ(sc, IWN_HW_IF_CONFIG) & IWN_HW_IF_CONFIG_PREPARE_DONE)) break; DELAY(10); } if (ntries == 15000) return ETIMEDOUT; /* Hardware should be ready now. */ IWN_SETBITS(sc, IWN_HW_IF_CONFIG, IWN_HW_IF_CONFIG_NIC_READY); for (ntries = 0; ntries < 5; ntries++) { if (IWN_READ(sc, IWN_HW_IF_CONFIG) & IWN_HW_IF_CONFIG_NIC_READY) return 0; DELAY(10); } return ETIMEDOUT; } static int iwn_hw_init(struct iwn_softc *sc) { struct iwn_ops *ops = &sc->ops; int error, chnl, qid; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); /* Clear pending interrupts. */ IWN_WRITE(sc, IWN_INT, 0xffffffff); if ((error = iwn_apm_init(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not power ON adapter, error %d\n", __func__, error); return error; } /* Select VMAIN power source. */ if ((error = iwn_nic_lock(sc)) != 0) return error; iwn_prph_clrbits(sc, IWN_APMG_PS, IWN_APMG_PS_PWR_SRC_MASK); iwn_nic_unlock(sc); /* Perform adapter-specific initialization. */ if ((error = ops->nic_config(sc)) != 0) return error; /* Initialize RX ring. */ if ((error = iwn_nic_lock(sc)) != 0) return error; IWN_WRITE(sc, IWN_FH_RX_CONFIG, 0); IWN_WRITE(sc, IWN_FH_RX_WPTR, 0); /* Set physical address of RX ring (256-byte aligned). */ IWN_WRITE(sc, IWN_FH_RX_BASE, sc->rxq.desc_dma.paddr >> 8); /* Set physical address of RX status (16-byte aligned). */ IWN_WRITE(sc, IWN_FH_STATUS_WPTR, sc->rxq.stat_dma.paddr >> 4); /* Enable RX. */ IWN_WRITE(sc, IWN_FH_RX_CONFIG, IWN_FH_RX_CONFIG_ENA | IWN_FH_RX_CONFIG_IGN_RXF_EMPTY | /* HW bug workaround */ IWN_FH_RX_CONFIG_IRQ_DST_HOST | IWN_FH_RX_CONFIG_SINGLE_FRAME | IWN_FH_RX_CONFIG_RB_TIMEOUT(0) | IWN_FH_RX_CONFIG_NRBD(IWN_RX_RING_COUNT_LOG)); iwn_nic_unlock(sc); IWN_WRITE(sc, IWN_FH_RX_WPTR, (IWN_RX_RING_COUNT - 1) & ~7); if ((error = iwn_nic_lock(sc)) != 0) return error; /* Initialize TX scheduler. */ iwn_prph_write(sc, sc->sched_txfact_addr, 0); /* Set physical address of "keep warm" page (16-byte aligned). */ IWN_WRITE(sc, IWN_FH_KW_ADDR, sc->kw_dma.paddr >> 4); /* Initialize TX rings. */ for (qid = 0; qid < sc->ntxqs; qid++) { struct iwn_tx_ring *txq = &sc->txq[qid]; /* Set physical address of TX ring (256-byte aligned). */ IWN_WRITE(sc, IWN_FH_CBBC_QUEUE(qid), txq->desc_dma.paddr >> 8); } iwn_nic_unlock(sc); /* Enable DMA channels. */ for (chnl = 0; chnl < sc->ndmachnls; chnl++) { IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), IWN_FH_TX_CONFIG_DMA_ENA | IWN_FH_TX_CONFIG_DMA_CREDIT_ENA); } /* Clear "radio off" and "commands blocked" bits. */ IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_CMD_BLOCKED); /* Clear pending interrupts. */ IWN_WRITE(sc, IWN_INT, 0xffffffff); /* Enable interrupt coalescing. */ IWN_WRITE(sc, IWN_INT_COALESCING, 512 / 8); /* Enable interrupts. */ IWN_WRITE(sc, IWN_INT_MASK, sc->int_mask); /* _Really_ make sure "radio off" bit is cleared! */ IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); IWN_WRITE(sc, IWN_UCODE_GP1_CLR, IWN_UCODE_GP1_RFKILL); /* Enable shadow registers. */ if (sc->base_params->shadow_reg_enable) IWN_SETBITS(sc, IWN_SHADOW_REG_CTRL, 0x800fffff); if ((error = ops->load_firmware(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not load firmware, error %d\n", __func__, error); return error; } /* Wait at most one second for firmware alive notification. */ if ((error = msleep(sc, &sc->sc_mtx, PCATCH, "iwninit", hz)) != 0) { device_printf(sc->sc_dev, "%s: timeout waiting for adapter to initialize, error %d\n", __func__, error); return error; } /* Do post-firmware initialization. */ DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return ops->post_alive(sc); } static void iwn_hw_stop(struct iwn_softc *sc) { int chnl, qid, ntries; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); IWN_WRITE(sc, IWN_RESET, IWN_RESET_NEVO); /* Disable interrupts. */ IWN_WRITE(sc, IWN_INT_MASK, 0); IWN_WRITE(sc, IWN_INT, 0xffffffff); IWN_WRITE(sc, IWN_FH_INT, 0xffffffff); sc->sc_flags &= ~IWN_FLAG_USE_ICT; /* Make sure we no longer hold the NIC lock. */ iwn_nic_unlock(sc); /* Stop TX scheduler. */ iwn_prph_write(sc, sc->sched_txfact_addr, 0); /* Stop all DMA channels. */ if (iwn_nic_lock(sc) == 0) { for (chnl = 0; chnl < sc->ndmachnls; chnl++) { IWN_WRITE(sc, IWN_FH_TX_CONFIG(chnl), 0); for (ntries = 0; ntries < 200; ntries++) { if (IWN_READ(sc, IWN_FH_TX_STATUS) & IWN_FH_TX_STATUS_IDLE(chnl)) break; DELAY(10); } } iwn_nic_unlock(sc); } /* Stop RX ring. */ iwn_reset_rx_ring(sc, &sc->rxq); /* Reset all TX rings. */ for (qid = 0; qid < sc->ntxqs; qid++) iwn_reset_tx_ring(sc, &sc->txq[qid]); if (iwn_nic_lock(sc) == 0) { iwn_prph_write(sc, IWN_APMG_CLK_DIS, IWN_APMG_CLK_CTRL_DMA_CLK_RQT); iwn_nic_unlock(sc); } DELAY(5); /* Power OFF adapter. */ iwn_apm_stop(sc); } static void iwn_panicked(void *arg0, int pending) { struct iwn_softc *sc = arg0; struct ieee80211com *ic = &sc->sc_ic; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); #if 0 int error; #endif if (vap == NULL) { printf("%s: null vap\n", __func__); return; } device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; " "restarting\n", __func__, vap->iv_state); /* * This is not enough work. We need to also reinitialise * the correct transmit state for aggregation enabled queues, * which has a very specific requirement of * ring index = 802.11 seqno % 256. If we don't do this (which * we definitely don't!) then the firmware will just panic again. */ #if 1 ieee80211_restart_all(ic); #else IWN_LOCK(sc); iwn_stop_locked(sc); if ((error = iwn_init_locked(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not init hardware\n", __func__); goto unlock; } if (vap->iv_state >= IEEE80211_S_AUTH && (error = iwn_auth(sc, vap)) != 0) { device_printf(sc->sc_dev, "%s: could not move to auth state\n", __func__); } if (vap->iv_state >= IEEE80211_S_RUN && (error = iwn_run(sc, vap)) != 0) { device_printf(sc->sc_dev, "%s: could not move to run state\n", __func__); } unlock: IWN_UNLOCK(sc); #endif } static int iwn_init_locked(struct iwn_softc *sc) { int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->%s begin\n", __func__); IWN_LOCK_ASSERT(sc); if (sc->sc_flags & IWN_FLAG_RUNNING) goto end; sc->sc_flags |= IWN_FLAG_RUNNING; if ((error = iwn_hw_prepare(sc)) != 0) { device_printf(sc->sc_dev, "%s: hardware not ready, error %d\n", __func__, error); goto fail; } /* Initialize interrupt mask to default value. */ sc->int_mask = IWN_INT_MASK_DEF; sc->sc_flags &= ~IWN_FLAG_USE_ICT; /* Check that the radio is not disabled by hardware switch. */ if (!(IWN_READ(sc, IWN_GP_CNTRL) & IWN_GP_CNTRL_RFKILL)) { iwn_stop_locked(sc); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return (1); } /* Read firmware images from the filesystem. */ if ((error = iwn_read_firmware(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not read firmware, error %d\n", __func__, error); goto fail; } /* Initialize hardware and upload firmware. */ error = iwn_hw_init(sc); iwn_unload_firmware(sc); if (error != 0) { device_printf(sc->sc_dev, "%s: could not initialize hardware, error %d\n", __func__, error); goto fail; } /* Configure adapter now that it is ready. */ if ((error = iwn_config(sc)) != 0) { device_printf(sc->sc_dev, "%s: could not configure device, error %d\n", __func__, error); goto fail; } callout_reset(&sc->watchdog_to, hz, iwn_watchdog, sc); end: DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end\n",__func__); return (0); fail: iwn_stop_locked(sc); DPRINTF(sc, IWN_DEBUG_TRACE, "->%s: end in error\n",__func__); return (-1); } static int iwn_init(struct iwn_softc *sc) { int error; IWN_LOCK(sc); error = iwn_init_locked(sc); IWN_UNLOCK(sc); return (error); } static void iwn_stop_locked(struct iwn_softc *sc) { IWN_LOCK_ASSERT(sc); if (!(sc->sc_flags & IWN_FLAG_RUNNING)) return; sc->sc_is_scanning = 0; sc->sc_tx_timer = 0; callout_stop(&sc->watchdog_to); callout_stop(&sc->scan_timeout); callout_stop(&sc->calib_to); sc->sc_flags &= ~IWN_FLAG_RUNNING; /* Power OFF hardware. */ iwn_hw_stop(sc); } static void iwn_stop(struct iwn_softc *sc) { IWN_LOCK(sc); iwn_stop_locked(sc); IWN_UNLOCK(sc); } /* * Callback from net80211 to start a scan. */ static void iwn_scan_start(struct ieee80211com *ic) { struct iwn_softc *sc = ic->ic_softc; IWN_LOCK(sc); /* make the link LED blink while we're scanning */ iwn_set_led(sc, IWN_LED_LINK, 20, 2); IWN_UNLOCK(sc); } /* * Callback from net80211 to terminate a scan. */ static void iwn_scan_end(struct ieee80211com *ic) { struct iwn_softc *sc = ic->ic_softc; struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps); IWN_LOCK(sc); if (vap->iv_state == IEEE80211_S_RUN) { /* Set link LED to ON status if we are associated */ iwn_set_led(sc, IWN_LED_LINK, 0, 1); } IWN_UNLOCK(sc); } /* * Callback from net80211 to force a channel change. */ static void iwn_set_channel(struct ieee80211com *ic) { const struct ieee80211_channel *c = ic->ic_curchan; struct iwn_softc *sc = ic->ic_softc; int error; DPRINTF(sc, IWN_DEBUG_TRACE, "->Doing %s\n", __func__); IWN_LOCK(sc); sc->sc_rxtap.wr_chan_freq = htole16(c->ic_freq); sc->sc_rxtap.wr_chan_flags = htole16(c->ic_flags); sc->sc_txtap.wt_chan_freq = htole16(c->ic_freq); sc->sc_txtap.wt_chan_flags = htole16(c->ic_flags); /* * Only need to set the channel in Monitor mode. AP scanning and auth * are already taken care of by their respective firmware commands. */ if (ic->ic_opmode == IEEE80211_M_MONITOR) { error = iwn_config(sc); if (error != 0) device_printf(sc->sc_dev, "%s: error %d settting channel\n", __func__, error); } IWN_UNLOCK(sc); } /* * Callback from net80211 to start scanning of the current channel. */ static void iwn_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell) { struct ieee80211vap *vap = ss->ss_vap; struct ieee80211com *ic = vap->iv_ic; struct iwn_softc *sc = ic->ic_softc; int error; IWN_LOCK(sc); error = iwn_scan(sc, vap, ss, ic->ic_curchan); IWN_UNLOCK(sc); if (error != 0) ieee80211_cancel_scan(vap); } /* * Callback from net80211 to handle the minimum dwell time being met. * The intent is to terminate the scan but we just let the firmware * notify us when it's finished as we have no safe way to abort it. */ static void iwn_scan_mindwell(struct ieee80211_scan_state *ss) { /* NB: don't try to abort scan; wait for firmware to finish */ } #ifdef IWN_DEBUG #define IWN_DESC(x) case x: return #x /* * Translate CSR code to string */ static char *iwn_get_csr_string(int csr) { switch (csr) { IWN_DESC(IWN_HW_IF_CONFIG); IWN_DESC(IWN_INT_COALESCING); IWN_DESC(IWN_INT); IWN_DESC(IWN_INT_MASK); IWN_DESC(IWN_FH_INT); IWN_DESC(IWN_GPIO_IN); IWN_DESC(IWN_RESET); IWN_DESC(IWN_GP_CNTRL); IWN_DESC(IWN_HW_REV); IWN_DESC(IWN_EEPROM); IWN_DESC(IWN_EEPROM_GP); IWN_DESC(IWN_OTP_GP); IWN_DESC(IWN_GIO); IWN_DESC(IWN_GP_UCODE); IWN_DESC(IWN_GP_DRIVER); IWN_DESC(IWN_UCODE_GP1); IWN_DESC(IWN_UCODE_GP2); IWN_DESC(IWN_LED); IWN_DESC(IWN_DRAM_INT_TBL); IWN_DESC(IWN_GIO_CHICKEN); IWN_DESC(IWN_ANA_PLL); IWN_DESC(IWN_HW_REV_WA); IWN_DESC(IWN_DBG_HPET_MEM); default: return "UNKNOWN CSR"; } } /* * This function print firmware register */ static void iwn_debug_register(struct iwn_softc *sc) { int i; static const uint32_t csr_tbl[] = { IWN_HW_IF_CONFIG, IWN_INT_COALESCING, IWN_INT, IWN_INT_MASK, IWN_FH_INT, IWN_GPIO_IN, IWN_RESET, IWN_GP_CNTRL, IWN_HW_REV, IWN_EEPROM, IWN_EEPROM_GP, IWN_OTP_GP, IWN_GIO, IWN_GP_UCODE, IWN_GP_DRIVER, IWN_UCODE_GP1, IWN_UCODE_GP2, IWN_LED, IWN_DRAM_INT_TBL, IWN_GIO_CHICKEN, IWN_ANA_PLL, IWN_HW_REV_WA, IWN_DBG_HPET_MEM, }; DPRINTF(sc, IWN_DEBUG_REGISTER, "CSR values: (2nd byte of IWN_INT_COALESCING is IWN_INT_PERIODIC)%s", "\n"); for (i = 0; i < nitems(csr_tbl); i++){ DPRINTF(sc, IWN_DEBUG_REGISTER," %10s: 0x%08x ", iwn_get_csr_string(csr_tbl[i]), IWN_READ(sc, csr_tbl[i])); if ((i+1) % 3 == 0) DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); } DPRINTF(sc, IWN_DEBUG_REGISTER,"%s","\n"); } #endif Index: head/sys/dev/ixl/if_ixl.c =================================================================== --- head/sys/dev/ixl/if_ixl.c (revision 338948) +++ head/sys/dev/ixl/if_ixl.c (revision 338949) @@ -1,1712 +1,1713 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl.h" #include "ixl_pf.h" #ifdef IXL_IW #include "ixl_iw.h" #include "ixl_iw_int.h" #endif #ifdef PCI_IOV #include "ixl_pf_iov.h" #endif /********************************************************************* * Driver version *********************************************************************/ #define IXL_DRIVER_VERSION_MAJOR 2 #define IXL_DRIVER_VERSION_MINOR 0 #define IXL_DRIVER_VERSION_BUILD 0 #define IXL_DRIVER_VERSION_STRING \ __XSTRING(IXL_DRIVER_VERSION_MAJOR) "." \ __XSTRING(IXL_DRIVER_VERSION_MINOR) "." \ __XSTRING(IXL_DRIVER_VERSION_BUILD) "-k" /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * * ( Vendor ID, Device ID, Branding String ) *********************************************************************/ static pci_vendor_info_t ixl_vendor_info_array[] = { PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, "Intel(R) Ethernet Controller X710 for 10GbE SFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, "Intel(R) Ethernet Controller XL710 for 40GbE backplane"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, "Intel(R) Ethernet Controller X710 for 10GbE backplane"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_B, "Intel(R) Ethernet Controller XL710 for 40GbE QSFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, "Intel(R) Ethernet Controller X710 for 10GbE QSFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, "Intel(R) Ethernet Controller X710 for 10GBASE-T"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, "Intel(R) Ethernet Controller X710/X557-AT 10GBASE-T"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, "Intel(R) Ethernet Connection X722 for 10GbE backplane"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE QSFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 1GbE"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, "Intel(R) Ethernet Connection X722 for 10GBASE-T"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, "Intel(R) Ethernet Connection X722 for 10GbE SFP+"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_B, "Intel(R) Ethernet Controller XXV710 for 25GbE backplane"), PVIDV(I40E_INTEL_VENDOR_ID, I40E_DEV_ID_25G_SFP28, "Intel(R) Ethernet Controller XXV710 for 25GbE SFP28"), /* required last entry */ PVID_END }; /********************************************************************* * Function prototypes *********************************************************************/ /*** IFLIB interface ***/ static void *ixl_register(device_t dev); static int ixl_if_attach_pre(if_ctx_t ctx); static int ixl_if_attach_post(if_ctx_t ctx); static int ixl_if_detach(if_ctx_t ctx); static int ixl_if_shutdown(if_ctx_t ctx); static int ixl_if_suspend(if_ctx_t ctx); static int ixl_if_resume(if_ctx_t ctx); static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix); static void ixl_if_enable_intr(if_ctx_t ctx); static void ixl_if_disable_intr(if_ctx_t ctx); static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid); static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); static void ixl_if_queues_free(if_ctx_t ctx); static void ixl_if_update_admin_status(if_ctx_t ctx); static void ixl_if_multi_set(if_ctx_t ctx); static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu); static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); static int ixl_if_media_change(if_ctx_t ctx); static int ixl_if_promisc_set(if_ctx_t ctx, int flags); static void ixl_if_timer(if_ctx_t ctx, uint16_t qid); static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag); static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag); static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt); static void ixl_if_vflr_handle(if_ctx_t ctx); // static void ixl_if_link_intr_enable(if_ctx_t ctx); static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req); static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data); /*** Other ***/ static int ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int); static void ixl_save_pf_tunables(struct ixl_pf *); static int ixl_allocate_pci_resources(struct ixl_pf *); /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ixl_methods[] = { /* Device interface */ DEVMETHOD(device_register, ixl_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), #ifdef PCI_IOV DEVMETHOD(pci_iov_init, ixl_iov_init), DEVMETHOD(pci_iov_uninit, ixl_iov_uninit), DEVMETHOD(pci_iov_add_vf, ixl_add_vf), #endif DEVMETHOD_END }; static driver_t ixl_driver = { "ixl", ixl_methods, sizeof(struct ixl_pf), }; devclass_t ixl_devclass; DRIVER_MODULE(ixl, pci, ixl_driver, ixl_devclass, 0, 0); +IFLIB_PNP_INFO(pci, ixl, ixl_vendor_info_array); MODULE_VERSION(ixl, 3); MODULE_DEPEND(ixl, pci, 1, 1, 1); MODULE_DEPEND(ixl, ether, 1, 1, 1); MODULE_DEPEND(ixl, iflib, 1, 1, 1); static device_method_t ixl_if_methods[] = { DEVMETHOD(ifdi_attach_pre, ixl_if_attach_pre), DEVMETHOD(ifdi_attach_post, ixl_if_attach_post), DEVMETHOD(ifdi_detach, ixl_if_detach), DEVMETHOD(ifdi_shutdown, ixl_if_shutdown), DEVMETHOD(ifdi_suspend, ixl_if_suspend), DEVMETHOD(ifdi_resume, ixl_if_resume), DEVMETHOD(ifdi_init, ixl_if_init), DEVMETHOD(ifdi_stop, ixl_if_stop), DEVMETHOD(ifdi_msix_intr_assign, ixl_if_msix_intr_assign), DEVMETHOD(ifdi_intr_enable, ixl_if_enable_intr), DEVMETHOD(ifdi_intr_disable, ixl_if_disable_intr), //DEVMETHOD(ifdi_link_intr_enable, ixl_if_link_intr_enable), DEVMETHOD(ifdi_rx_queue_intr_enable, ixl_if_rx_queue_intr_enable), DEVMETHOD(ifdi_tx_queue_intr_enable, ixl_if_tx_queue_intr_enable), DEVMETHOD(ifdi_tx_queues_alloc, ixl_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, ixl_if_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, ixl_if_queues_free), DEVMETHOD(ifdi_update_admin_status, ixl_if_update_admin_status), DEVMETHOD(ifdi_multi_set, ixl_if_multi_set), DEVMETHOD(ifdi_mtu_set, ixl_if_mtu_set), DEVMETHOD(ifdi_media_status, ixl_if_media_status), DEVMETHOD(ifdi_media_change, ixl_if_media_change), DEVMETHOD(ifdi_promisc_set, ixl_if_promisc_set), DEVMETHOD(ifdi_timer, ixl_if_timer), DEVMETHOD(ifdi_vlan_register, ixl_if_vlan_register), DEVMETHOD(ifdi_vlan_unregister, ixl_if_vlan_unregister), DEVMETHOD(ifdi_get_counter, ixl_if_get_counter), DEVMETHOD(ifdi_vflr_handle, ixl_if_vflr_handle), DEVMETHOD(ifdi_i2c_req, ixl_if_i2c_req), DEVMETHOD(ifdi_priv_ioctl, ixl_if_priv_ioctl), // ifdi_led_func // ifdi_debug DEVMETHOD_END }; static driver_t ixl_if_driver = { "ixl_if", ixl_if_methods, sizeof(struct ixl_pf) }; /* ** TUNEABLE PARAMETERS: */ static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD, 0, "IXL driver parameters"); /* * Leave this on unless you need to send flow control * frames (or other control frames) from software */ static int ixl_enable_tx_fc_filter = 1; TUNABLE_INT("hw.ixl.enable_tx_fc_filter", &ixl_enable_tx_fc_filter); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN, &ixl_enable_tx_fc_filter, 0, "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources"); static int ixl_i2c_access_method = 0; TUNABLE_INT("hw.ixl.i2c_access_method", &ixl_i2c_access_method); SYSCTL_INT(_hw_ixl, OID_AUTO, i2c_access_method, CTLFLAG_RDTUN, &ixl_i2c_access_method, 0, IXL_SYSCTL_HELP_I2C_METHOD); /* * Different method for processing TX descriptor * completion. */ static int ixl_enable_head_writeback = 1; TUNABLE_INT("hw.ixl.enable_head_writeback", &ixl_enable_head_writeback); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, &ixl_enable_head_writeback, 0, "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); static int ixl_core_debug_mask = 0; TUNABLE_INT("hw.ixl.core_debug_mask", &ixl_core_debug_mask); SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN, &ixl_core_debug_mask, 0, "Display debug statements that are printed in non-shared code"); static int ixl_shared_debug_mask = 0; TUNABLE_INT("hw.ixl.shared_debug_mask", &ixl_shared_debug_mask); SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN, &ixl_shared_debug_mask, 0, "Display debug statements that are printed in shared code"); #if 0 /* ** Controls for Interrupt Throttling ** - true/false for dynamic adjustment ** - default values for static ITR */ static int ixl_dynamic_rx_itr = 0; TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, &ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); static int ixl_dynamic_tx_itr = 0; TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, &ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); #endif static int ixl_rx_itr = IXL_ITR_8K; TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN, &ixl_rx_itr, 0, "RX Interrupt Rate"); static int ixl_tx_itr = IXL_ITR_4K; TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr); SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN, &ixl_tx_itr, 0, "TX Interrupt Rate"); #ifdef IXL_IW int ixl_enable_iwarp = 0; TUNABLE_INT("hw.ixl.enable_iwarp", &ixl_enable_iwarp); SYSCTL_INT(_hw_ixl, OID_AUTO, enable_iwarp, CTLFLAG_RDTUN, &ixl_enable_iwarp, 0, "iWARP enabled"); #if __FreeBSD_version < 1100000 int ixl_limit_iwarp_msix = 1; #else int ixl_limit_iwarp_msix = IXL_IW_MAX_MSIX; #endif TUNABLE_INT("hw.ixl.limit_iwarp_msix", &ixl_limit_iwarp_msix); SYSCTL_INT(_hw_ixl, OID_AUTO, limit_iwarp_msix, CTLFLAG_RDTUN, &ixl_limit_iwarp_msix, 0, "Limit MSIX vectors assigned to iWARP"); #endif extern struct if_txrx ixl_txrx_hwb; extern struct if_txrx ixl_txrx_dwb; static struct if_shared_ctx ixl_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE, .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tso_maxsegsize = IXL_MAX_DMA_SEG_SIZE, .isc_rx_maxsize = 16384, .isc_rx_nsegments = IXL_MAX_RX_SEGS, .isc_rx_maxsegsize = IXL_MAX_DMA_SEG_SIZE, .isc_nfl = 1, .isc_ntxqs = 1, .isc_nrxqs = 1, .isc_admin_intrcnt = 1, .isc_vendor_info = ixl_vendor_info_array, .isc_driver_version = IXL_DRIVER_VERSION_STRING, .isc_driver = &ixl_if_driver, .isc_flags = IFLIB_NEED_SCRATCH | IFLIB_NEED_ZERO_CSUM | IFLIB_ADMIN_ALWAYS_RUN, .isc_nrxd_min = {IXL_MIN_RING}, .isc_ntxd_min = {IXL_MIN_RING}, .isc_nrxd_max = {IXL_MAX_RING}, .isc_ntxd_max = {IXL_MAX_RING}, .isc_nrxd_default = {IXL_DEFAULT_RING}, .isc_ntxd_default = {IXL_DEFAULT_RING}, }; if_shared_ctx_t ixl_sctx = &ixl_sctx_init; /*** Functions ***/ static void * ixl_register(device_t dev) { return (ixl_sctx); } static int ixl_allocate_pci_resources(struct ixl_pf *pf) { int rid; struct i40e_hw *hw = &pf->hw; device_t dev = iflib_get_dev(pf->vsi.ctx); /* Map BAR0 */ rid = PCIR_BAR(0); pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!(pf->pci_mem)) { device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); return (ENXIO); } /* Save off the PCI information */ hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); hw->subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); hw->subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); hw->bus.device = pci_get_slot(dev); hw->bus.func = pci_get_function(dev); /* Save off register access information */ pf->osdep.mem_bus_space_tag = rman_get_bustag(pf->pci_mem); pf->osdep.mem_bus_space_handle = rman_get_bushandle(pf->pci_mem); pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem); pf->osdep.flush_reg = I40E_GLGEN_STAT; pf->osdep.dev = dev; pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle; pf->hw.back = &pf->osdep; return (0); } static int ixl_if_attach_pre(if_ctx_t ctx) { device_t dev; struct ixl_pf *pf; struct i40e_hw *hw; struct ixl_vsi *vsi; if_softc_ctx_t scctx; struct i40e_filter_control_settings filter; enum i40e_status_code status; int error = 0; INIT_DEBUGOUT("ixl_if_attach_pre: begin"); /* Allocate, clear, and link in our primary soft structure */ dev = iflib_get_dev(ctx); pf = iflib_get_softc(ctx); vsi = &pf->vsi; vsi->back = pf; pf->dev = dev; hw = &pf->hw; /* ** Note this assumes we have a single embedded VSI, ** this could be enhanced later to allocate multiple */ //vsi->dev = pf->dev; vsi->hw = &pf->hw; vsi->id = 0; vsi->num_vlans = 0; vsi->ctx = ctx; vsi->media = iflib_get_media(ctx); vsi->shared = scctx = iflib_get_softc_ctx(ctx); /* Save tunable values */ ixl_save_pf_tunables(pf); /* Do PCI setup - map BAR0, etc */ if (ixl_allocate_pci_resources(pf)) { device_printf(dev, "Allocation of PCI resources failed\n"); error = ENXIO; goto err_pci_res; } /* Establish a clean starting point */ i40e_clear_hw(hw); status = i40e_pf_reset(hw); if (status) { device_printf(dev, "PF reset failure %s\n", i40e_stat_str(hw, status)); error = EIO; goto err_out; } /* Initialize the shared code */ status = i40e_init_shared_code(hw); if (status) { device_printf(dev, "Unable to initialize shared code, error %s\n", i40e_stat_str(hw, status)); error = EIO; goto err_out; } /* Set up the admin queue */ hw->aq.num_arq_entries = IXL_AQ_LEN; hw->aq.num_asq_entries = IXL_AQ_LEN; hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; status = i40e_init_adminq(hw); if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) { device_printf(dev, "Unable to initialize Admin Queue, error %s\n", i40e_stat_str(hw, status)); error = EIO; goto err_out; } ixl_print_nvm_version(pf); if (status == I40E_ERR_FIRMWARE_API_VERSION) { device_printf(dev, "The driver for the device stopped " "because the NVM image is newer than expected.\n"); device_printf(dev, "You must install the most recent version of " "the network driver.\n"); error = EIO; goto err_out; } if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver > I40E_FW_MINOR_VERSION(hw)) { device_printf(dev, "The driver for the device detected " "a newer version of the NVM image than expected.\n"); device_printf(dev, "Please install the most recent version " "of the network driver.\n"); } else if (hw->aq.api_maj_ver == 1 && hw->aq.api_min_ver < 4) { device_printf(dev, "The driver for the device detected " "an older version of the NVM image than expected.\n"); device_printf(dev, "Please update the NVM image.\n"); } /* Clear PXE mode */ i40e_clear_pxe_mode(hw); /* Get capabilities from the device */ error = ixl_get_hw_capabilities(pf); if (error) { device_printf(dev, "get_hw_capabilities failed: %d\n", error); goto err_get_cap; } /* Set up host memory cache */ status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, hw->func_caps.num_rx_qp, 0, 0); if (status) { device_printf(dev, "init_lan_hmc failed: %s\n", i40e_stat_str(hw, status)); goto err_get_cap; } status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); if (status) { device_printf(dev, "configure_lan_hmc failed: %s\n", i40e_stat_str(hw, status)); goto err_mac_hmc; } /* Disable LLDP from the firmware for certain NVM versions */ if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || (pf->hw.aq.fw_maj_ver < 4)) { i40e_aq_stop_lldp(hw, TRUE, NULL); pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED; } /* Get MAC addresses from hardware */ i40e_get_mac_addr(hw, hw->mac.addr); error = i40e_validate_mac_addr(hw->mac.addr); if (error) { device_printf(dev, "validate_mac_addr failed: %d\n", error); goto err_mac_hmc; } bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); iflib_set_mac(ctx, hw->mac.addr); i40e_get_port_mac_addr(hw, hw->mac.port_addr); /* Set up the device filtering */ bzero(&filter, sizeof(filter)); filter.enable_ethtype = TRUE; filter.enable_macvlan = TRUE; filter.enable_fdir = FALSE; filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; if (i40e_set_filter_control(hw, &filter)) device_printf(dev, "i40e_set_filter_control() failed\n"); /* Query device FW LLDP status */ ixl_get_fw_lldp_status(pf); /* Tell FW to apply DCB config on link up */ i40e_aq_set_dcb_parameters(hw, true, NULL); /* Fill out iflib parameters */ if (hw->mac.type == I40E_MAC_X722) scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128; else scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64; if (vsi->enable_head_writeback) { scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN); scctx->isc_txrx = &ixl_txrx_hwb; } else { scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc), DBA_ALIGN); scctx->isc_txrx = &ixl_txrx_dwb; } scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN); scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR); scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS; scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS; scctx->isc_tx_tso_size_max = IXL_TSO_SIZE; scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE; scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size; scctx->isc_tx_csum_flags = CSUM_OFFLOAD; scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS; INIT_DEBUGOUT("ixl_if_attach_pre: end"); return (0); err_mac_hmc: i40e_shutdown_lan_hmc(hw); err_get_cap: i40e_shutdown_adminq(hw); err_out: ixl_free_pci_resources(pf); err_pci_res: return (error); } static int ixl_if_attach_post(if_ctx_t ctx) { device_t dev; struct ixl_pf *pf; struct i40e_hw *hw; struct ixl_vsi *vsi; int error = 0; enum i40e_status_code status; INIT_DEBUGOUT("ixl_if_attach_post: begin"); dev = iflib_get_dev(ctx); pf = iflib_get_softc(ctx); vsi = &pf->vsi; vsi->ifp = iflib_get_ifp(ctx); hw = &pf->hw; /* Setup OS network interface / ifnet */ if (ixl_setup_interface(dev, pf)) { device_printf(dev, "interface setup failed!\n"); error = EIO; goto err; } /* Determine link state */ if (ixl_attach_get_link_status(pf)) { error = EINVAL; goto err; } error = ixl_switch_config(pf); if (error) { device_printf(dev, "Initial ixl_switch_config() failed: %d\n", error); goto err; } /* Add protocol filters to list */ ixl_init_filters(vsi); /* Init queue allocation manager */ error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp); if (error) { device_printf(dev, "Failed to init queue manager for PF queues, error %d\n", error); goto err; } /* reserve a contiguous allocation for the PF's VSI */ error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, max(vsi->num_rx_queues, vsi->num_tx_queues), &pf->qtag); if (error) { device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n", error); goto err; } device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n", pf->qtag.num_allocated, pf->qtag.num_active); /* Limit PHY interrupts to link, autoneg, and modules failure */ status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK, NULL); if (status) { device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s," " aq_err %s\n", i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status)); goto err; } /* Get the bus configuration and set the shared code */ ixl_get_bus_info(pf); /* Keep admin queue interrupts active while driver is loaded */ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { ixl_configure_intr0_msix(pf); ixl_enable_intr0(hw); } /* Set initial advertised speed sysctl value */ ixl_set_initial_advertised_speeds(pf); /* Initialize statistics & add sysctls */ ixl_add_device_sysctls(pf); ixl_pf_reset_stats(pf); ixl_update_stats_counters(pf); ixl_add_hw_stats(pf); hw->phy.get_link_info = true; i40e_get_link_status(hw, &pf->link_up); ixl_update_link_status(pf); #ifdef PCI_IOV ixl_initialize_sriov(pf); #endif #ifdef IXL_IW if (hw->func_caps.iwarp && ixl_enable_iwarp) { pf->iw_enabled = (pf->iw_msix > 0) ? true : false; if (pf->iw_enabled) { error = ixl_iw_pf_attach(pf); if (error) { device_printf(dev, "interfacing to iwarp driver failed: %d\n", error); goto err; } else device_printf(dev, "iWARP ready\n"); } else device_printf(dev, "iwarp disabled on this device (no msix vectors)\n"); } else { pf->iw_enabled = false; device_printf(dev, "The device is not iWARP enabled\n"); } #endif INIT_DBG_DEV(dev, "end"); return (0); err: INIT_DEBUGOUT("end: error %d", error); /* ixl_if_detach() is called on error from this */ return (error); } static int ixl_if_detach(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; enum i40e_status_code status; #if defined(PCI_IOV) || defined(IXL_IW) int error; #endif INIT_DBG_DEV(dev, "begin"); #ifdef IXL_IW if (ixl_enable_iwarp && pf->iw_enabled) { error = ixl_iw_pf_detach(pf); if (error == EBUSY) { device_printf(dev, "iwarp in use; stop it first.\n"); return (error); } } #endif #ifdef PCI_IOV error = pci_iov_detach(dev); if (error != 0) { device_printf(dev, "SR-IOV in use; detach first.\n"); return (error); } #endif /* Remove all previously allocated media types */ ifmedia_removeall(vsi->media); /* Shutdown LAN HMC */ if (hw->hmc.hmc_obj) { status = i40e_shutdown_lan_hmc(hw); if (status) device_printf(dev, "i40e_shutdown_lan_hmc() failed with status %s\n", i40e_stat_str(hw, status)); } /* Shutdown admin queue */ ixl_disable_intr0(hw); status = i40e_shutdown_adminq(hw); if (status) device_printf(dev, "i40e_shutdown_adminq() failed with status %s\n", i40e_stat_str(hw, status)); ixl_pf_qmgr_destroy(&pf->qmgr); ixl_free_pci_resources(pf); ixl_free_mac_filters(vsi); INIT_DBG_DEV(dev, "end"); return (0); } /* TODO: Do shutdown-specific stuff here */ static int ixl_if_shutdown(if_ctx_t ctx) { int error = 0; INIT_DEBUGOUT("ixl_if_shutdown: begin"); /* TODO: Call ixl_if_stop()? */ /* TODO: Then setup low power mode */ return (error); } static int ixl_if_suspend(if_ctx_t ctx) { int error = 0; INIT_DEBUGOUT("ixl_if_suspend: begin"); /* TODO: Call ixl_if_stop()? */ /* TODO: Then setup low power mode */ return (error); } static int ixl_if_resume(if_ctx_t ctx) { struct ifnet *ifp = iflib_get_ifp(ctx); INIT_DEBUGOUT("ixl_if_resume: begin"); /* Read & clear wake-up registers */ /* Required after D3->D0 transition */ if (ifp->if_flags & IFF_UP) ixl_if_init(ctx); return (0); } /* Set Report Status queue fields to 0 */ static void ixl_init_tx_rsqs(struct ixl_vsi *vsi) { if_softc_ctx_t scctx = vsi->shared; struct ixl_tx_queue *tx_que; int i, j; for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; txr->tx_rs_cidx = txr->tx_rs_pidx = txr->tx_cidx_processed = 0; for (j = 0; j < scctx->isc_ntxd[0]; j++) txr->tx_rsq[j] = QIDX_INVALID; } } static void ixl_init_tx_cidx(struct ixl_vsi *vsi) { struct ixl_tx_queue *tx_que; int i; for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { struct tx_ring *txr = &tx_que->txr; txr->tx_cidx_processed = 0; } } void ixl_if_init(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = &pf->hw; device_t dev = iflib_get_dev(ctx); u8 tmpaddr[ETHER_ADDR_LEN]; int ret; /* * If the aq is dead here, it probably means something outside of the driver * did something to the adapter, like a PF reset. * So rebuild the driver's state here if that occurs. */ if (!i40e_check_asq_alive(&pf->hw)) { device_printf(dev, "Admin Queue is down; resetting...\n"); ixl_teardown_hw_structs(pf); ixl_reset(pf); } /* Get the latest mac address... User might use a LAA */ bcopy(IF_LLADDR(vsi->ifp), tmpaddr, ETH_ALEN); if (!cmp_etheraddr(hw->mac.addr, tmpaddr) && (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) { ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); bcopy(tmpaddr, hw->mac.addr, ETH_ALEN); ret = i40e_aq_mac_address_write(hw, I40E_AQC_WRITE_TYPE_LAA_ONLY, hw->mac.addr, NULL); if (ret) { device_printf(dev, "LLA address change failed!!\n"); return; } ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY); } iflib_set_mac(ctx, hw->mac.addr); /* Prepare the VSI: rings, hmc contexts, etc... */ if (ixl_initialize_vsi(vsi)) { device_printf(dev, "initialize vsi failed!!\n"); return; } // TODO: Call iflib setup multicast filters here? // It's called in ixgbe in D5213 ixl_if_multi_set(ctx); /* Set up RSS */ ixl_config_rss(pf); /* Set up MSI/X routing and the ITR settings */ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { ixl_configure_queue_intr_msix(pf); ixl_configure_itr(pf); } else ixl_configure_legacy(pf); if (vsi->enable_head_writeback) ixl_init_tx_cidx(vsi); else ixl_init_tx_rsqs(vsi); ixl_enable_rings(vsi); i40e_aq_set_default_vsi(hw, vsi->seid, NULL); ixl_reconfigure_filters(vsi); #ifdef IXL_IW if (ixl_enable_iwarp && pf->iw_enabled) { ret = ixl_iw_pf_init(pf); if (ret) device_printf(dev, "initialize iwarp failed, code %d\n", ret); } #endif } void ixl_if_stop(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; INIT_DEBUGOUT("ixl_if_stop: begin\n"); // TODO: This may need to be reworked #ifdef IXL_IW /* Stop iWARP device */ if (ixl_enable_iwarp && pf->iw_enabled) ixl_iw_pf_stop(pf); #endif ixl_disable_rings_intr(vsi); ixl_disable_rings(vsi); } static int ixl_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct ixl_rx_queue *rx_que = vsi->rx_queues; struct ixl_tx_queue *tx_que = vsi->tx_queues; int err, i, rid, vector = 0; char buf[16]; /* Admin Que must use vector 0*/ rid = vector + 1; err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, ixl_msix_adminq, pf, 0, "aq"); if (err) { iflib_irq_free(ctx, &vsi->irq); device_printf(iflib_get_dev(ctx), "Failed to register Admin que handler"); return (err); } // TODO: Re-enable this at some point // iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_IOV, pf, 0, "ixl_iov"); /* Now set up the stations */ for (i = 0, vector = 1; i < vsi->num_rx_queues; i++, vector++, rx_que++) { rid = vector + 1; snprintf(buf, sizeof(buf), "rxq%d", i); err = iflib_irq_alloc_generic(ctx, &rx_que->que_irq, rid, IFLIB_INTR_RX, ixl_msix_que, rx_que, rx_que->rxr.me, buf); /* XXX: Does the driver work as expected if there are fewer num_rx_queues than * what's expected in the iflib context? */ if (err) { device_printf(iflib_get_dev(ctx), "Failed to allocate q int %d err: %d", i, err); vsi->num_rx_queues = i + 1; goto fail; } rx_que->msix = vector; } bzero(buf, sizeof(buf)); for (i = 0; i < vsi->num_tx_queues; i++, tx_que++) { snprintf(buf, sizeof(buf), "txq%d", i); iflib_softirq_alloc_generic(ctx, &vsi->rx_queues[i % vsi->num_rx_queues].que_irq, IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); /* TODO: Maybe call a strategy function for this to figure out which * interrupts to map Tx queues to. I don't know if there's an immediately * better way than this other than a user-supplied map, though. */ tx_que->msix = (i % vsi->num_rx_queues) + 1; } return (0); fail: iflib_irq_free(ctx, &vsi->irq); rx_que = vsi->rx_queues; for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) iflib_irq_free(ctx, &rx_que->que_irq); return (err); } /* * Enable all interrupts * * Called in: * iflib_init_locked, after ixl_if_init() */ static void ixl_if_enable_intr(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; struct ixl_rx_queue *que = vsi->rx_queues; ixl_enable_intr0(hw); /* Enable queue interrupts */ for (int i = 0; i < vsi->num_rx_queues; i++, que++) /* TODO: Queue index parameter is probably wrong */ ixl_enable_queue(hw, que->rxr.me); } /* * Disable queue interrupts * * Other interrupt causes need to remain active. */ static void ixl_if_disable_intr(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; struct ixl_rx_queue *rx_que = vsi->rx_queues; if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) { for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) ixl_disable_queue(hw, rx_que->msix - 1); } else { // Set PFINT_LNKLST0 FIRSTQ_INDX to 0x7FF // stops queues from triggering interrupts wr32(hw, I40E_PFINT_LNKLST0, 0x7FF); } } static int ixl_if_rx_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; struct ixl_rx_queue *rx_que = &vsi->rx_queues[rxqid]; ixl_enable_queue(hw, rx_que->msix - 1); return (0); } static int ixl_if_tx_queue_intr_enable(if_ctx_t ctx, uint16_t txqid) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; struct ixl_tx_queue *tx_que = &vsi->tx_queues[txqid]; ixl_enable_queue(hw, tx_que->msix - 1); return (0); } static int ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; if_softc_ctx_t scctx = vsi->shared; struct ixl_tx_queue *que; // int i; int i, j, error = 0; MPASS(vsi->num_tx_queues > 0); MPASS(ntxqs == 1); MPASS(vsi->num_tx_queues == ntxqsets); /* Allocate queue structure memory */ if (!(vsi->tx_queues = (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXL, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); return (ENOMEM); } for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { struct tx_ring *txr = &que->txr; txr->me = i; que->vsi = vsi; if (!vsi->enable_head_writeback) { /* Allocate report status array */ if (!(txr->tx_rsq = malloc(sizeof(qidx_t) * scctx->isc_ntxd[0], M_IXL, M_NOWAIT))) { device_printf(iflib_get_dev(ctx), "failed to allocate tx_rsq memory\n"); error = ENOMEM; goto fail; } /* Init report status array */ for (j = 0; j < scctx->isc_ntxd[0]; j++) txr->tx_rsq[j] = QIDX_INVALID; } /* get the virtual and physical address of the hardware queues */ txr->tail = I40E_QTX_TAIL(txr->me); txr->tx_base = (struct i40e_tx_desc *)vaddrs[i * ntxqs]; txr->tx_paddr = paddrs[i * ntxqs]; txr->que = que; } return (0); fail: ixl_if_queues_free(ctx); return (error); } static int ixl_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct ixl_rx_queue *que; int i, error = 0; MPASS(vsi->num_rx_queues > 0); MPASS(nrxqs == 1); MPASS(vsi->num_rx_queues == nrxqsets); /* Allocate queue structure memory */ if (!(vsi->rx_queues = (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) * nrxqsets, M_IXL, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); error = ENOMEM; goto fail; } for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { struct rx_ring *rxr = &que->rxr; rxr->me = i; que->vsi = vsi; /* get the virtual and physical address of the hardware queues */ rxr->tail = I40E_QRX_TAIL(rxr->me); rxr->rx_base = (union i40e_rx_desc *)vaddrs[i * nrxqs]; rxr->rx_paddr = paddrs[i * nrxqs]; rxr->que = que; } return (0); fail: ixl_if_queues_free(ctx); return (error); } static void ixl_if_queues_free(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; if (vsi->enable_head_writeback) { struct ixl_tx_queue *que; int i = 0; for (i = 0, que = vsi->tx_queues; i < vsi->num_tx_queues; i++, que++) { struct tx_ring *txr = &que->txr; if (txr->tx_rsq != NULL) { free(txr->tx_rsq, M_IXL); txr->tx_rsq = NULL; } } } if (vsi->tx_queues != NULL) { free(vsi->tx_queues, M_IXL); vsi->tx_queues = NULL; } if (vsi->rx_queues != NULL) { free(vsi->rx_queues, M_IXL); vsi->rx_queues = NULL; } } void ixl_update_link_status(struct ixl_pf *pf) { struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = &pf->hw; u64 baudrate; if (pf->link_up) { if (vsi->link_active == FALSE) { vsi->link_active = TRUE; baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed); iflib_link_state_change(vsi->ctx, LINK_STATE_UP, baudrate); ixl_link_up_msg(pf); #ifdef PCI_IOV ixl_broadcast_link_state(pf); #endif } } else { /* Link down */ if (vsi->link_active == TRUE) { vsi->link_active = FALSE; iflib_link_state_change(vsi->ctx, LINK_STATE_DOWN, 0); #ifdef PCI_IOV ixl_broadcast_link_state(pf); #endif } } } static int ixl_process_adminq(struct ixl_pf *pf, u16 *pending) { enum i40e_status_code status = I40E_SUCCESS; struct i40e_arq_event_info event; struct i40e_hw *hw = &pf->hw; device_t dev = pf->dev; u16 opcode; u32 loop = 0, reg; event.buf_len = IXL_AQ_BUF_SZ; event.msg_buf = malloc(event.buf_len, M_IXL, M_NOWAIT | M_ZERO); if (!event.msg_buf) { device_printf(dev, "%s: Unable to allocate memory for Admin" " Queue event!\n", __func__); return (ENOMEM); } /* clean and process any events */ do { status = i40e_clean_arq_element(hw, &event, pending); if (status) break; opcode = LE16_TO_CPU(event.desc.opcode); ixl_dbg(pf, IXL_DBG_AQ, "Admin Queue event: %#06x\n", opcode); switch (opcode) { case i40e_aqc_opc_get_link_status: ixl_link_event(pf, &event); break; case i40e_aqc_opc_send_msg_to_pf: #ifdef PCI_IOV ixl_handle_vf_msg(pf, &event); #endif break; /* * This should only occur on no-drop queues, which * aren't currently configured. */ case i40e_aqc_opc_event_lan_overflow: device_printf(dev, "LAN overflow event\n"); break; default: break; } } while (*pending && (loop++ < IXL_ADM_LIMIT)); free(event.msg_buf, M_IXL); /* Re-enable admin queue interrupt cause */ reg = rd32(hw, I40E_PFINT_ICR0_ENA); reg |= I40E_PFINT_ICR0_ENA_ADMINQ_MASK; wr32(hw, I40E_PFINT_ICR0_ENA, reg); return (status); } static void ixl_if_update_admin_status(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct i40e_hw *hw = &pf->hw; u16 pending; if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) ixl_handle_empr_reset(pf); if (pf->state & IXL_PF_STATE_MDD_PENDING) ixl_handle_mdd_event(pf); #ifdef PCI_IOV if (pf->state & IXL_PF_STATE_VF_RESET_REQ) iflib_iov_intr_deferred(ctx); #endif ixl_process_adminq(pf, &pending); ixl_update_link_status(pf); /* * If there are still messages to process, reschedule ourselves. * Otherwise, re-enable our interrupt and go to sleep. */ if (pending > 0) iflib_admin_intr_deferred(ctx); else ixl_enable_intr0(hw); } static void ixl_if_multi_set(if_ctx_t ctx) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; int mcnt = 0, flags; IOCTL_DEBUGOUT("ixl_if_multi_set: begin"); mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR); /* delete existing MC filters */ ixl_del_multi(vsi); if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, TRUE, NULL); return; } /* (re-)install filters for all mcast addresses */ mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi); if (mcnt > 0) { flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); ixl_add_hw_filters(vsi, flags, mcnt); } IOCTL_DEBUGOUT("ixl_if_multi_set: end"); } static int ixl_if_mtu_set(if_ctx_t ctx, uint32_t mtu) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)"); if (mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) return (EINVAL); vsi->shared->isc_max_frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; return (0); } static void ixl_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) { struct ixl_pf *pf = iflib_get_softc(ctx); struct i40e_hw *hw = &pf->hw; INIT_DEBUGOUT("ixl_media_status: begin"); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!pf->link_up) { return; } ifmr->ifm_status |= IFM_ACTIVE; /* Hardware is always full-duplex */ ifmr->ifm_active |= IFM_FDX; switch (hw->phy.link_info.phy_type) { /* 100 M */ case I40E_PHY_TYPE_100BASE_TX: ifmr->ifm_active |= IFM_100_TX; break; /* 1 G */ case I40E_PHY_TYPE_1000BASE_T: ifmr->ifm_active |= IFM_1000_T; break; case I40E_PHY_TYPE_1000BASE_SX: ifmr->ifm_active |= IFM_1000_SX; break; case I40E_PHY_TYPE_1000BASE_LX: ifmr->ifm_active |= IFM_1000_LX; break; case I40E_PHY_TYPE_1000BASE_T_OPTICAL: ifmr->ifm_active |= IFM_1000_T; break; /* 10 G */ case I40E_PHY_TYPE_10GBASE_SFPP_CU: ifmr->ifm_active |= IFM_10G_TWINAX; break; case I40E_PHY_TYPE_10GBASE_SR: ifmr->ifm_active |= IFM_10G_SR; break; case I40E_PHY_TYPE_10GBASE_LR: ifmr->ifm_active |= IFM_10G_LR; break; case I40E_PHY_TYPE_10GBASE_T: ifmr->ifm_active |= IFM_10G_T; break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: ifmr->ifm_active |= IFM_10G_TWINAX; break; case I40E_PHY_TYPE_10GBASE_AOC: ifmr->ifm_active |= IFM_10G_AOC; break; /* 25 G */ case I40E_PHY_TYPE_25GBASE_KR: ifmr->ifm_active |= IFM_25G_KR; break; case I40E_PHY_TYPE_25GBASE_CR: ifmr->ifm_active |= IFM_25G_CR; break; case I40E_PHY_TYPE_25GBASE_SR: ifmr->ifm_active |= IFM_25G_SR; break; case I40E_PHY_TYPE_25GBASE_LR: ifmr->ifm_active |= IFM_25G_LR; break; case I40E_PHY_TYPE_25GBASE_AOC: ifmr->ifm_active |= IFM_25G_AOC; break; case I40E_PHY_TYPE_25GBASE_ACC: ifmr->ifm_active |= IFM_25G_ACC; break; /* 40 G */ case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: ifmr->ifm_active |= IFM_40G_CR4; break; case I40E_PHY_TYPE_40GBASE_SR4: ifmr->ifm_active |= IFM_40G_SR4; break; case I40E_PHY_TYPE_40GBASE_LR4: ifmr->ifm_active |= IFM_40G_LR4; break; case I40E_PHY_TYPE_XLAUI: ifmr->ifm_active |= IFM_OTHER; break; case I40E_PHY_TYPE_1000BASE_KX: ifmr->ifm_active |= IFM_1000_KX; break; case I40E_PHY_TYPE_SGMII: ifmr->ifm_active |= IFM_1000_SGMII; break; /* ERJ: What's the difference between these? */ case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: ifmr->ifm_active |= IFM_10G_CR1; break; case I40E_PHY_TYPE_10GBASE_KX4: ifmr->ifm_active |= IFM_10G_KX4; break; case I40E_PHY_TYPE_10GBASE_KR: ifmr->ifm_active |= IFM_10G_KR; break; case I40E_PHY_TYPE_SFI: ifmr->ifm_active |= IFM_10G_SFI; break; /* Our single 20G media type */ case I40E_PHY_TYPE_20GBASE_KR2: ifmr->ifm_active |= IFM_20G_KR2; break; case I40E_PHY_TYPE_40GBASE_KR4: ifmr->ifm_active |= IFM_40G_KR4; break; case I40E_PHY_TYPE_XLPPI: case I40E_PHY_TYPE_40GBASE_AOC: ifmr->ifm_active |= IFM_40G_XLPPI; break; /* Unknown to driver */ default: ifmr->ifm_active |= IFM_UNKNOWN; break; } /* Report flow control status as well */ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ifmr->ifm_active |= IFM_ETH_TXPAUSE; if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ifmr->ifm_active |= IFM_ETH_RXPAUSE; } static int ixl_if_media_change(if_ctx_t ctx) { struct ifmedia *ifm = iflib_get_media(ctx); INIT_DEBUGOUT("ixl_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n"); return (ENODEV); } static int ixl_if_promisc_set(if_ctx_t ctx, int flags) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct ifnet *ifp = iflib_get_ifp(ctx); struct i40e_hw *hw = vsi->hw; int err; bool uni = FALSE, multi = FALSE; if (flags & IFF_PROMISC) uni = multi = TRUE; else if (flags & IFF_ALLMULTI || if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR) multi = TRUE; err = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, uni, NULL, true); if (err) return (err); err = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, multi, NULL); return (err); } static void ixl_if_timer(if_ctx_t ctx, uint16_t qid) { struct ixl_pf *pf = iflib_get_softc(ctx); //struct i40e_hw *hw = &pf->hw; //struct ixl_tx_queue *que = &vsi->tx_queues[qid]; #if 0 u32 mask; /* ** Check status of the queues */ mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); /* If queue param has outstanding work, trigger sw irq */ // TODO: TX queues in iflib don't use HW interrupts; does this do anything? if (que->busy) wr32(hw, I40E_PFINT_DYN_CTLN(que->txr.me), mask); #endif if (qid != 0) return; /* Fire off the adminq task */ iflib_admin_intr_deferred(ctx); /* Update stats */ ixl_update_stats_counters(pf); } static void ixl_if_vlan_register(if_ctx_t ctx, u16 vtag) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; ++vsi->num_vlans; ixl_add_filter(vsi, hw->mac.addr, vtag); } static void ixl_if_vlan_unregister(if_ctx_t ctx, u16 vtag) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; struct i40e_hw *hw = vsi->hw; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; --vsi->num_vlans; ixl_del_filter(vsi, hw->mac.addr, vtag); } static uint64_t ixl_if_get_counter(if_ctx_t ctx, ift_counter cnt) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ixl_vsi *vsi = &pf->vsi; if_t ifp = iflib_get_ifp(ctx); switch (cnt) { case IFCOUNTER_IPACKETS: return (vsi->ipackets); case IFCOUNTER_IERRORS: return (vsi->ierrors); case IFCOUNTER_OPACKETS: return (vsi->opackets); case IFCOUNTER_OERRORS: return (vsi->oerrors); case IFCOUNTER_COLLISIONS: /* Collisions are by standard impossible in 40G/10G Ethernet */ return (0); case IFCOUNTER_IBYTES: return (vsi->ibytes); case IFCOUNTER_OBYTES: return (vsi->obytes); case IFCOUNTER_IMCASTS: return (vsi->imcasts); case IFCOUNTER_OMCASTS: return (vsi->omcasts); case IFCOUNTER_IQDROPS: return (vsi->iqdrops); case IFCOUNTER_OQDROPS: return (vsi->oqdrops); case IFCOUNTER_NOPROTO: return (vsi->noproto); default: return (if_get_counter_default(ifp, cnt)); } } static void ixl_if_vflr_handle(if_ctx_t ctx) { IXL_DEV_ERR(iflib_get_dev(ctx), ""); // TODO: call ixl_handle_vflr() } static int ixl_if_i2c_req(if_ctx_t ctx, struct ifi2creq *req) { struct ixl_pf *pf = iflib_get_softc(ctx); if (pf->read_i2c_byte == NULL) return (EINVAL); for (int i = 0; i < req->len; i++) if (pf->read_i2c_byte(pf, req->offset + i, req->dev_addr, &req->data[i])) return (EIO); return (0); } static int ixl_if_priv_ioctl(if_ctx_t ctx, u_long command, caddr_t data) { struct ixl_pf *pf = iflib_get_softc(ctx); struct ifdrv *ifd = (struct ifdrv *)data; int error = 0; /* NVM update command */ if (ifd->ifd_cmd == I40E_NVM_ACCESS) error = ixl_handle_nvmupd_cmd(pf, ifd); else error = EINVAL; return (error); } static int ixl_mc_filter_apply(void *arg, struct ifmultiaddr *ifma, int count __unused) { struct ixl_vsi *vsi = arg; if (ifma->ifma_addr->sa_family != AF_LINK) return (0); ixl_add_mc_filter(vsi, (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr)); return (1); } /* * Sanity check and save off tunable values. */ static void ixl_save_pf_tunables(struct ixl_pf *pf) { device_t dev = pf->dev; /* Save tunable information */ pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter; pf->dbg_mask = ixl_core_debug_mask; pf->hw.debug_mask = ixl_shared_debug_mask; pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback); #if 0 pf->dynamic_rx_itr = ixl_dynamic_rx_itr; pf->dynamic_tx_itr = ixl_dynamic_tx_itr; #endif if (ixl_i2c_access_method > 3 || ixl_i2c_access_method < 0) pf->i2c_access_method = 0; else pf->i2c_access_method = ixl_i2c_access_method; if (ixl_tx_itr < 0 || ixl_tx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid tx_itr value of %d set!\n", ixl_tx_itr); device_printf(dev, "tx_itr must be between %d and %d, " "inclusive\n", 0, IXL_MAX_ITR); device_printf(dev, "Using default value of %d instead\n", IXL_ITR_4K); pf->tx_itr = IXL_ITR_4K; } else pf->tx_itr = ixl_tx_itr; if (ixl_rx_itr < 0 || ixl_rx_itr > IXL_MAX_ITR) { device_printf(dev, "Invalid rx_itr value of %d set!\n", ixl_rx_itr); device_printf(dev, "rx_itr must be between %d and %d, " "inclusive\n", 0, IXL_MAX_ITR); device_printf(dev, "Using default value of %d instead\n", IXL_ITR_8K); pf->rx_itr = IXL_ITR_8K; } else pf->rx_itr = ixl_rx_itr; } Index: head/sys/dev/ixl/if_ixlv.c =================================================================== --- head/sys/dev/ixl/if_ixlv.c (revision 338948) +++ head/sys/dev/ixl/if_ixlv.c (revision 338949) @@ -1,3347 +1,3349 @@ /****************************************************************************** Copyright (c) 2013-2018, Intel Corporation All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. 3. Neither the name of the Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /*$FreeBSD$*/ #include "ixl.h" #include "ixlv.h" /********************************************************************* * Driver version *********************************************************************/ #define IXLV_DRIVER_VERSION_MAJOR 1 #define IXLV_DRIVER_VERSION_MINOR 5 #define IXLV_DRIVER_VERSION_BUILD 4 char ixlv_driver_version[] = __XSTRING(IXLV_DRIVER_VERSION_MAJOR) "." __XSTRING(IXLV_DRIVER_VERSION_MINOR) "." __XSTRING(IXLV_DRIVER_VERSION_BUILD) "-iflib-k"; /********************************************************************* * PCI Device ID Table * * Used by probe to select devices to load on * * ( Vendor ID, Device ID, Branding String ) *********************************************************************/ static pci_vendor_info_t ixlv_vendor_info_array[] = { {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_ADAPTIVE_VF, 0, 0, 0}, /* required last entry */ PVID_END }; /********************************************************************* * Function prototypes *********************************************************************/ static void *ixlv_register(device_t dev); static int ixlv_if_attach_pre(if_ctx_t ctx); static int ixlv_if_attach_post(if_ctx_t ctx); static int ixlv_if_detach(if_ctx_t ctx); static int ixlv_if_shutdown(if_ctx_t ctx); static int ixlv_if_suspend(if_ctx_t ctx); static int ixlv_if_resume(if_ctx_t ctx); static int ixlv_if_msix_intr_assign(if_ctx_t ctx, int msix); static void ixlv_if_enable_intr(if_ctx_t ctx); static void ixlv_if_disable_intr(if_ctx_t ctx); static int ixlv_if_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid); static int ixlv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets); static int ixlv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nqs, int nqsets); static void ixlv_if_queues_free(if_ctx_t ctx); static void ixlv_if_update_admin_status(if_ctx_t ctx); static void ixlv_if_multi_set(if_ctx_t ctx); static int ixlv_if_mtu_set(if_ctx_t ctx, uint32_t mtu); static void ixlv_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr); static int ixlv_if_media_change(if_ctx_t ctx); static int ixlv_if_promisc_set(if_ctx_t ctx, int flags); static void ixlv_if_timer(if_ctx_t ctx, uint16_t qid); static void ixlv_if_vlan_register(if_ctx_t ctx, u16 vtag); static void ixlv_if_vlan_unregister(if_ctx_t ctx, u16 vtag); static uint64_t ixlv_if_get_counter(if_ctx_t ctx, ift_counter cnt); static void ixlv_if_stop(if_ctx_t ctx); static int ixlv_allocate_pci_resources(struct ixlv_sc *); static int ixlv_reset_complete(struct i40e_hw *); static int ixlv_setup_vc(struct ixlv_sc *); static int ixlv_reset(struct ixlv_sc *); static int ixlv_vf_config(struct ixlv_sc *); static void ixlv_init_filters(struct ixlv_sc *); static void ixlv_free_pci_resources(struct ixlv_sc *); static void ixlv_free_filters(struct ixlv_sc *); static void ixlv_setup_interface(device_t, struct ixl_vsi *); static void ixlv_add_sysctls(struct ixlv_sc *); static void ixlv_enable_adminq_irq(struct i40e_hw *); static void ixlv_disable_adminq_irq(struct i40e_hw *); static void ixlv_enable_queue_irq(struct i40e_hw *, int); static void ixlv_disable_queue_irq(struct i40e_hw *, int); static void ixlv_config_rss(struct ixlv_sc *); static void ixlv_stop(struct ixlv_sc *); static int ixlv_add_mac_filter(struct ixlv_sc *, u8 *, u16); static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr); static int ixlv_msix_que(void *); static int ixlv_msix_adminq(void *); static void ixlv_do_adminq_locked(struct ixlv_sc *sc); static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *, enum i40e_status_code); static void ixlv_configure_itr(struct ixlv_sc *); static void ixlv_setup_vlan_filters(struct ixlv_sc *); static char *ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed); static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS); // static void ixlv_add_sysctls(struct ixlv_sc *); #ifdef IXL_DEBUG static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS); static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS); #endif /********************************************************************* * FreeBSD Device Interface Entry Points *********************************************************************/ static device_method_t ixlv_methods[] = { /* Device interface */ DEVMETHOD(device_register, ixlv_register), DEVMETHOD(device_probe, iflib_device_probe), DEVMETHOD(device_attach, iflib_device_attach), DEVMETHOD(device_detach, iflib_device_detach), DEVMETHOD(device_shutdown, iflib_device_shutdown), DEVMETHOD_END }; static driver_t ixlv_driver = { "ixlv", ixlv_methods, sizeof(struct ixlv_sc), }; devclass_t ixlv_devclass; DRIVER_MODULE(ixlv, pci, ixlv_driver, ixlv_devclass, 0, 0); - +MODULE_PNP_INFO("U32:vendor;U32:device;U32:subvendor;U32:subdevice;U32:revision", + pci, ixlv, ixlv_vendor_info_array, + nitems(ixlv_vendor_info_array) - 1); MODULE_DEPEND(ixlv, pci, 1, 1, 1); MODULE_DEPEND(ixlv, ether, 1, 1, 1); MODULE_DEPEND(ixlv, iflib, 1, 1, 1); static device_method_t ixlv_if_methods[] = { DEVMETHOD(ifdi_attach_pre, ixlv_if_attach_pre), DEVMETHOD(ifdi_attach_post, ixlv_if_attach_post), DEVMETHOD(ifdi_detach, ixlv_if_detach), DEVMETHOD(ifdi_shutdown, ixlv_if_shutdown), DEVMETHOD(ifdi_suspend, ixlv_if_suspend), DEVMETHOD(ifdi_resume, ixlv_if_resume), DEVMETHOD(ifdi_init, ixlv_if_init), DEVMETHOD(ifdi_stop, ixlv_if_stop), DEVMETHOD(ifdi_msix_intr_assign, ixlv_if_msix_intr_assign), DEVMETHOD(ifdi_intr_enable, ixlv_if_enable_intr), DEVMETHOD(ifdi_intr_disable, ixlv_if_disable_intr), DEVMETHOD(ifdi_queue_intr_enable, ixlv_if_queue_intr_enable), DEVMETHOD(ifdi_tx_queues_alloc, ixlv_if_tx_queues_alloc), DEVMETHOD(ifdi_rx_queues_alloc, ixlv_if_rx_queues_alloc), DEVMETHOD(ifdi_queues_free, ixlv_if_queues_free), DEVMETHOD(ifdi_update_admin_status, ixlv_if_update_admin_status), DEVMETHOD(ifdi_multi_set, ixlv_if_multi_set), DEVMETHOD(ifdi_mtu_set, ixlv_if_mtu_set), // DEVMETHOD(ifdi_crcstrip_set, ixlv_if_crcstrip_set), DEVMETHOD(ifdi_media_status, ixlv_if_media_status), DEVMETHOD(ifdi_media_change, ixlv_if_media_change), DEVMETHOD(ifdi_promisc_set, ixlv_if_promisc_set), DEVMETHOD(ifdi_timer, ixlv_if_timer), DEVMETHOD(ifdi_vlan_register, ixlv_if_vlan_register), DEVMETHOD(ifdi_vlan_unregister, ixlv_if_vlan_unregister), DEVMETHOD(ifdi_get_counter, ixlv_if_get_counter), DEVMETHOD_END }; static driver_t ixlv_if_driver = { "ixlv_if", ixlv_if_methods, sizeof(struct ixlv_sc) }; /* ** TUNEABLE PARAMETERS: */ static SYSCTL_NODE(_hw, OID_AUTO, ixlv, CTLFLAG_RD, 0, "IXLV driver parameters"); /* ** Number of descriptors per ring: ** - TX and RX sizes are independently configurable */ static int ixlv_tx_ring_size = IXL_DEFAULT_RING; TUNABLE_INT("hw.ixlv.tx_ring_size", &ixlv_tx_ring_size); SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_ring_size, CTLFLAG_RDTUN, &ixlv_tx_ring_size, 0, "TX Descriptor Ring Size"); static int ixlv_rx_ring_size = IXL_DEFAULT_RING; TUNABLE_INT("hw.ixlv.rx_ring_size", &ixlv_rx_ring_size); SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_ring_size, CTLFLAG_RDTUN, &ixlv_rx_ring_size, 0, "TX Descriptor Ring Size"); /* Set to zero to auto calculate */ int ixlv_max_queues = 0; TUNABLE_INT("hw.ixlv.max_queues", &ixlv_max_queues); SYSCTL_INT(_hw_ixlv, OID_AUTO, max_queues, CTLFLAG_RDTUN, &ixlv_max_queues, 0, "Number of Queues"); /* * Different method for processing TX descriptor * completion. */ static int ixlv_enable_head_writeback = 0; TUNABLE_INT("hw.ixlv.enable_head_writeback", &ixlv_enable_head_writeback); SYSCTL_INT(_hw_ixlv, OID_AUTO, enable_head_writeback, CTLFLAG_RDTUN, &ixlv_enable_head_writeback, 0, "For detecting last completed TX descriptor by hardware, use value written by HW instead of checking descriptors"); /* ** Controls for Interrupt Throttling ** - true/false for dynamic adjustment ** - default values for static ITR */ int ixlv_dynamic_rx_itr = 0; TUNABLE_INT("hw.ixlv.dynamic_rx_itr", &ixlv_dynamic_rx_itr); SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN, &ixlv_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate"); int ixlv_dynamic_tx_itr = 0; TUNABLE_INT("hw.ixlv.dynamic_tx_itr", &ixlv_dynamic_tx_itr); SYSCTL_INT(_hw_ixlv, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN, &ixlv_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate"); int ixlv_rx_itr = IXL_ITR_8K; TUNABLE_INT("hw.ixlv.rx_itr", &ixlv_rx_itr); SYSCTL_INT(_hw_ixlv, OID_AUTO, rx_itr, CTLFLAG_RDTUN, &ixlv_rx_itr, 0, "RX Interrupt Rate"); int ixlv_tx_itr = IXL_ITR_4K; TUNABLE_INT("hw.ixlv.tx_itr", &ixlv_tx_itr); SYSCTL_INT(_hw_ixlv, OID_AUTO, tx_itr, CTLFLAG_RDTUN, &ixlv_tx_itr, 0, "TX Interrupt Rate"); extern struct if_txrx ixl_txrx; static struct if_shared_ctx ixlv_sctx_init = { .isc_magic = IFLIB_MAGIC, .isc_q_align = PAGE_SIZE,/* max(DBA_ALIGN, PAGE_SIZE) */ .isc_tx_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tx_maxsegsize = PAGE_SIZE, .isc_tso_maxsize = IXL_TSO_SIZE + sizeof(struct ether_vlan_header), .isc_tso_maxsegsize = PAGE_SIZE, // TODO: Review the rx_maxsize and rx_maxsegsize params // Where are they used in iflib? .isc_rx_maxsize = 16384, .isc_rx_nsegments = 1, .isc_rx_maxsegsize = 16384, // TODO: What is isc_nfl for? .isc_nfl = 1, .isc_ntxqs = 1, .isc_nrxqs = 1, .isc_admin_intrcnt = 1, .isc_vendor_info = ixlv_vendor_info_array, .isc_driver_version = ixlv_driver_version, .isc_driver = &ixlv_if_driver, .isc_nrxd_min = {IXL_MIN_RING}, .isc_ntxd_min = {IXL_MIN_RING}, .isc_nrxd_max = {IXL_MAX_RING}, .isc_ntxd_max = {IXL_MAX_RING}, .isc_nrxd_default = {IXL_DEFAULT_RING}, .isc_ntxd_default = {IXL_DEFAULT_RING}, }; if_shared_ctx_t ixlv_sctx = &ixlv_sctx_init; /*** Functions ***/ static void * ixlv_register(device_t dev) { return (ixlv_sctx); } static int ixlv_if_attach_pre(if_ctx_t ctx) { device_t dev; struct ixlv_sc *sc; struct i40e_hw *hw; struct ixl_vsi *vsi; if_softc_ctx_t scctx; int error = 0; INIT_DBG_DEV(dev, "begin"); dev = iflib_get_dev(ctx); sc = iflib_get_softc(ctx); hw = &sc->hw; /* ** Note this assumes we have a single embedded VSI, ** this could be enhanced later to allocate multiple */ vsi = &sc->vsi; vsi->dev = dev; vsi->back = sc; vsi->hw = &sc->hw; // vsi->id = 0; vsi->num_vlans = 0; vsi->ctx = ctx; vsi->media = iflib_get_media(ctx); vsi->shared = scctx = iflib_get_softc_ctx(ctx); sc->dev = dev; /* Initialize hw struct */ ixlv_init_hw(sc); /* * These are the same across all current ixl models */ vsi->shared->isc_tx_nsegments = IXL_MAX_TX_SEGS; vsi->shared->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR); vsi->shared->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS; vsi->shared->isc_tx_tso_size_max = IXL_TSO_SIZE; vsi->shared->isc_tx_tso_segsize_max = PAGE_SIZE; /* Save this tunable */ vsi->enable_head_writeback = ixlv_enable_head_writeback; scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN); scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0] * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN); /* XXX: No idea what this does */ /* TODO: This value may depend on resources received */ scctx->isc_max_txqsets = scctx->isc_max_rxqsets = 16; /* Do PCI setup - map BAR0, etc */ if (ixlv_allocate_pci_resources(sc)) { device_printf(dev, "%s: Allocation of PCI resources failed\n", __func__); error = ENXIO; goto err_early; } INIT_DBG_DEV(dev, "Allocated PCI resources and MSIX vectors"); /* XXX: This is called by init_shared_code in the PF driver */ error = i40e_set_mac_type(hw); if (error) { device_printf(dev, "%s: set_mac_type failed: %d\n", __func__, error); goto err_pci_res; } error = ixlv_reset_complete(hw); if (error) { device_printf(dev, "%s: Device is still being reset\n", __func__); goto err_pci_res; } INIT_DBG_DEV(dev, "VF Device is ready for configuration"); /* Sets up Admin Queue */ error = ixlv_setup_vc(sc); if (error) { device_printf(dev, "%s: Error setting up PF comms, %d\n", __func__, error); goto err_pci_res; } INIT_DBG_DEV(dev, "PF API version verified"); /* Need API version before sending reset message */ error = ixlv_reset(sc); if (error) { device_printf(dev, "VF reset failed; reload the driver\n"); goto err_aq; } INIT_DBG_DEV(dev, "VF reset complete"); /* Ask for VF config from PF */ error = ixlv_vf_config(sc); if (error) { device_printf(dev, "Error getting configuration from PF: %d\n", error); goto err_aq; } device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n", sc->vf_res->num_vsis, sc->vf_res->num_queue_pairs, sc->vf_res->max_vectors, sc->vf_res->rss_key_size, sc->vf_res->rss_lut_size); #ifdef IXL_DEBUG device_printf(dev, "Offload flags: 0x%b\n", sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS); #endif /* got VF config message back from PF, now we can parse it */ for (int i = 0; i < sc->vf_res->num_vsis; i++) { if (sc->vf_res->vsi_res[i].vsi_type == I40E_VSI_SRIOV) sc->vsi_res = &sc->vf_res->vsi_res[i]; } if (!sc->vsi_res) { device_printf(dev, "%s: no LAN VSI found\n", __func__); error = EIO; goto err_res_buf; } vsi->id = sc->vsi_res->vsi_id; INIT_DBG_DEV(dev, "Resource Acquisition complete"); /* If no mac address was assigned just make a random one */ if (!ixlv_check_ether_addr(hw->mac.addr)) { u8 addr[ETHER_ADDR_LEN]; arc4rand(&addr, sizeof(addr), 0); addr[0] &= 0xFE; addr[0] |= 0x02; bcopy(addr, hw->mac.addr, sizeof(addr)); } bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN); iflib_set_mac(ctx, hw->mac.addr); // TODO: Is this still safe to call? // ixl_vsi_setup_rings_size(vsi, ixlv_tx_ring_size, ixlv_rx_ring_size); /* Allocate filter lists */ ixlv_init_filters(sc); /* Fill out more iflib parameters */ scctx->isc_txrx = &ixl_txrx; // TODO: Probably needs changing vsi->shared->isc_rss_table_size = sc->hw.func_caps.rss_table_size; scctx->isc_tx_csum_flags = CSUM_OFFLOAD; scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS; INIT_DBG_DEV(dev, "end"); return (0); err_res_buf: free(sc->vf_res, M_DEVBUF); err_aq: i40e_shutdown_adminq(hw); err_pci_res: ixlv_free_pci_resources(sc); err_early: ixlv_free_filters(sc); INIT_DBG_DEV(dev, "end: error %d", error); return (error); } static int ixlv_if_attach_post(if_ctx_t ctx) { device_t dev; struct ixlv_sc *sc; struct i40e_hw *hw; struct ixl_vsi *vsi; int error = 0; INIT_DBG_DEV(dev, "begin"); dev = iflib_get_dev(ctx); vsi = iflib_get_softc(ctx); vsi->ifp = iflib_get_ifp(ctx); sc = (struct ixlv_sc *)vsi->back; hw = &sc->hw; /* Setup the stack interface */ if (ixlv_setup_interface(dev, sc) != 0) { device_printf(dev, "%s: setup interface failed!\n", __func__); error = EIO; goto out; } INIT_DBG_DEV(dev, "Interface setup complete"); /* Initialize statistics & add sysctls */ bzero(&sc->vsi.eth_stats, sizeof(struct i40e_eth_stats)); ixlv_add_sysctls(sc); /* We want AQ enabled early */ ixlv_enable_adminq_irq(hw); INIT_DBG_DEV(dev, "end"); return (error); // TODO: Check if any failures can happen above #if 0 out: free(sc->vf_res, M_DEVBUF); i40e_shutdown_adminq(hw); ixlv_free_pci_resources(sc); ixlv_free_filters(sc); INIT_DBG_DEV(dev, "end: error %d", error); return (error); #endif } static int ixlv_if_detach(if_ctx_t ctx) { struct ixl_vsi *vsi = iflib_get_softc(ctx); struct ixlv_sc *sc = vsi->back; struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; enum i40e_status_code status; INIT_DBG_DEV(dev, "begin"); /* Remove all the media and link information */ ifmedia_removeall(&sc->media); /* Drain VC mgr */ callout_drain(&sc->vc_mgr.callout); ixlv_disable_adminq_irq(hw); status = i40e_shutdown_adminq(&sc->hw); if (status != I40E_SUCCESS) { device_printf(dev, "i40e_shutdown_adminq() failed with status %s\n", i40e_stat_str(hw, status)); } free(sc->vf_res, M_DEVBUF); ixlv_free_pci_resources(sc); ixlv_free_filters(sc); INIT_DBG_DEV(dev, "end"); return (0); } /* TODO: Do shutdown-specific stuff here */ static int ixlv_if_shutdown(if_ctx_t ctx) { int error = 0; INIT_DBG_DEV(dev, "begin"); /* TODO: Call ixl_if_stop()? */ return (error); } /* TODO: What is a VF supposed to do in suspend/resume? */ static int ixlv_if_suspend(if_ctx_t ctx) { int error = 0; INIT_DBG_DEV(dev, "begin"); /* TODO: Call ixl_if_stop()? */ return (error); } static int ixlv_if_resume(if_ctx_t ctx) { struct ifnet *ifp = iflib_get_ifp(ctx); INIT_DBG_DEV(dev, "begin"); /* Read & clear wake-up registers */ /* Required after D3->D0 transition */ if (ifp->if_flags & IFF_UP) ixlv_if_init(ctx); return (0); } #if 0 static int ixlv_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ixl_vsi *vsi = ifp->if_softc; struct ixlv_sc *sc = vsi->back; struct ifreq *ifr = (struct ifreq *)data; #if defined(INET) || defined(INET6) struct ifaddr *ifa = (struct ifaddr *)data; bool avoid_reset = FALSE; #endif int error = 0; switch (command) { case SIOCSIFADDR: #ifdef INET if (ifa->ifa_addr->sa_family == AF_INET) avoid_reset = TRUE; #endif #ifdef INET6 if (ifa->ifa_addr->sa_family == AF_INET6) avoid_reset = TRUE; #endif #if defined(INET) || defined(INET6) /* ** Calling init results in link renegotiation, ** so we avoid doing it when possible. */ if (avoid_reset) { ifp->if_flags |= IFF_UP; if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) ixlv_init(vsi); #ifdef INET if (!(ifp->if_flags & IFF_NOARP)) arp_ifinit(ifp, ifa); #endif } else error = ether_ioctl(ifp, command, data); break; #endif case SIOCSIFMTU: IOCTL_DBG_IF2(ifp, "SIOCSIFMTU (Set Interface MTU)"); mtx_lock(&sc->mtx); if (ifr->ifr_mtu > IXL_MAX_FRAME - ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) { error = EINVAL; IOCTL_DBG_IF(ifp, "mtu too large"); } else { IOCTL_DBG_IF2(ifp, "mtu: %lu -> %d", (u_long)ifp->if_mtu, ifr->ifr_mtu); // ERJ: Interestingly enough, these types don't match ifp->if_mtu = (u_long)ifr->ifr_mtu; vsi->max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixlv_init_locked(sc); } mtx_unlock(&sc->mtx); break; case SIOCSIFFLAGS: IOCTL_DBG_IF2(ifp, "SIOCSIFFLAGS (Set Interface Flags)"); mtx_lock(&sc->mtx); if (ifp->if_flags & IFF_UP) { if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) ixlv_init_locked(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixlv_stop(sc); sc->if_flags = ifp->if_flags; mtx_unlock(&sc->mtx); break; case SIOCADDMULTI: IOCTL_DBG_IF2(ifp, "SIOCADDMULTI"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { mtx_lock(&sc->mtx); ixlv_disable_intr(vsi); ixlv_add_multi(vsi); ixlv_enable_intr(vsi); mtx_unlock(&sc->mtx); } break; case SIOCDELMULTI: IOCTL_DBG_IF2(ifp, "SIOCDELMULTI"); if (sc->init_state == IXLV_RUNNING) { mtx_lock(&sc->mtx); ixlv_disable_intr(vsi); ixlv_del_multi(vsi); ixlv_enable_intr(vsi); mtx_unlock(&sc->mtx); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: IOCTL_DBG_IF2(ifp, "SIOCxIFMEDIA (Get/Set Interface Media)"); error = ifmedia_ioctl(ifp, ifr, &sc->media, command); break; case SIOCSIFCAP: { int mask = ifr->ifr_reqcap ^ ifp->if_capenable; IOCTL_DBG_IF2(ifp, "SIOCSIFCAP (Set Capabilities)"); ixlv_cap_txcsum_tso(vsi, ifp, mask); if (mask & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (mask & IFCAP_RXCSUM_IPV6) ifp->if_capenable ^= IFCAP_RXCSUM_IPV6; if (mask & IFCAP_LRO) ifp->if_capenable ^= IFCAP_LRO; if (mask & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (mask & IFCAP_VLAN_HWFILTER) ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; if (mask & IFCAP_VLAN_HWTSO) ifp->if_capenable ^= IFCAP_VLAN_HWTSO; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { ixlv_init(vsi); } VLAN_CAPABILITIES(ifp); break; } default: IOCTL_DBG_IF2(ifp, "UNKNOWN (0x%X)", (int)command); error = ether_ioctl(ifp, command, data); break; } return (error); } #endif /* ** To do a reinit on the VF is unfortunately more complicated ** than a physical device, we must have the PF more or less ** completely recreate our memory, so many things that were ** done only once at attach in traditional drivers now must be ** redone at each reinitialization. This function does that ** 'prelude' so we can then call the normal locked init code. */ int ixlv_reinit_locked(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; struct ixl_vsi *vsi = &sc->vsi; struct ifnet *ifp = vsi->ifp; struct ixlv_mac_filter *mf, *mf_temp; struct ixlv_vlan_filter *vf; int error = 0; INIT_DBG_IF(ifp, "begin"); if (ifp->if_drv_flags & IFF_DRV_RUNNING) ixlv_stop(sc); error = ixlv_reset(sc); INIT_DBG_IF(ifp, "VF was reset"); /* set the state in case we went thru RESET */ sc->init_state = IXLV_RUNNING; /* ** Resetting the VF drops all filters from hardware; ** we need to mark them to be re-added in init. */ SLIST_FOREACH_SAFE(mf, sc->mac_filters, next, mf_temp) { if (mf->flags & IXL_FILTER_DEL) { SLIST_REMOVE(sc->mac_filters, mf, ixlv_mac_filter, next); free(mf, M_DEVBUF); } else mf->flags |= IXL_FILTER_ADD; } if (vsi->num_vlans != 0) SLIST_FOREACH(vf, sc->vlan_filters, next) vf->flags = IXL_FILTER_ADD; else { /* clean any stale filters */ while (!SLIST_EMPTY(sc->vlan_filters)) { vf = SLIST_FIRST(sc->vlan_filters); SLIST_REMOVE_HEAD(sc->vlan_filters, next); free(vf, M_DEVBUF); } } ixlv_enable_adminq_irq(hw); ixl_vc_flush(&sc->vc_mgr); INIT_DBG_IF(ifp, "end"); return (error); } static void ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg, enum i40e_status_code code) { struct ixlv_sc *sc; sc = arg; /* * Ignore "Adapter Stopped" message as that happens if an ifconfig down * happens while a command is in progress, so we don't print an error * in that case. */ if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) { if_printf(sc->vsi.ifp, "Error %s waiting for PF to complete operation %d\n", i40e_stat_str(&sc->hw, code), cmd->request); } } void ixlv_if_init(if_ctx_t ctx) { struct ixl_vsi *vsi = iflib_get_softc(ctx); if_softc_ctx_t scctx = vsi->shared; struct ixlv_sc *sc = vsi->back; struct i40e_hw *hw = &sc->hw; struct ifnet *ifp = iflib_get_ifp(ctx); struct ixl_tx_queue *tx_que = vsi->tx_queues; struct ixl_rx_queue *rx_que = vsi->rx_queues; int error = 0; INIT_DBG_IF(ifp, "begin"); IXLV_CORE_LOCK_ASSERT(sc); /* Do a reinit first if an init has already been done */ if ((sc->init_state == IXLV_RUNNING) || (sc->init_state == IXLV_RESET_REQUIRED) || (sc->init_state == IXLV_RESET_PENDING)) error = ixlv_reinit_locked(sc); /* Don't bother with init if we failed reinit */ if (error) goto init_done; /* Remove existing MAC filter if new MAC addr is set */ if (bcmp(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN) != 0) { error = ixlv_del_mac_filter(sc, hw->mac.addr); if (error == 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->del_mac_cmd, IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete, sc); } /* Check for an LAA mac address... */ bcopy(IF_LLADDR(ifp), hw->mac.addr, ETHER_ADDR_LEN); /* Add mac filter for this VF to PF */ if (i40e_validate_mac_addr(hw->mac.addr) == I40E_SUCCESS) { error = ixlv_add_mac_filter(sc, hw->mac.addr, 0); if (!error || error == EEXIST) ixl_vc_enqueue(&sc->vc_mgr, &sc->add_mac_cmd, IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete, sc); } /* Setup vlan's if needed */ ixlv_setup_vlan_filters(sc); // TODO: Functionize /* Prepare the queues for operation */ for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) { // TODO: Necessary? Correct? ixl_init_tx_ring(vsi, tx_que); } for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) { struct rx_ring *rxr = &rx_que->rxr; if (scctx->isc_max_frame_size <= MCLBYTES) rxr->mbuf_sz = MCLBYTES; else rxr->mbuf_sz = MJUMPAGESIZE; } /* Set initial ITR values */ ixlv_configure_itr(sc); /* Configure queues */ ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd, IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc); /* Set up RSS */ ixlv_config_rss(sc); /* Map vectors */ ixl_vc_enqueue(&sc->vc_mgr, &sc->map_vectors_cmd, IXLV_FLAG_AQ_MAP_VECTORS, ixl_init_cmd_complete, sc); /* Enable queues */ ixl_vc_enqueue(&sc->vc_mgr, &sc->enable_queues_cmd, IXLV_FLAG_AQ_ENABLE_QUEUES, ixl_init_cmd_complete, sc); sc->init_state = IXLV_RUNNING; init_done: INIT_DBG_IF(ifp, "end"); return; } #if 0 void ixlv_init(void *arg) { struct ixl_vsi *vsi = (struct ixl_vsi *)arg; struct ixlv_sc *sc = vsi->back; int retries = 0; /* Prevent init from running again while waiting for AQ calls * made in init_locked() to complete. */ mtx_lock(&sc->mtx); if (sc->init_in_progress) { mtx_unlock(&sc->mtx); return; } else sc->init_in_progress = true; ixlv_init_locked(sc); mtx_unlock(&sc->mtx); /* Wait for init_locked to finish */ while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING) && ++retries < IXLV_MAX_INIT_WAIT) { i40e_msec_pause(25); } if (retries >= IXLV_MAX_INIT_WAIT) { if_printf(vsi->ifp, "Init failed to complete in allotted time!\n"); } mtx_lock(&sc->mtx); sc->init_in_progress = false; mtx_unlock(&sc->mtx); } /* * ixlv_attach() helper function; gathers information about * the (virtual) hardware for use elsewhere in the driver. */ static void ixlv_init_hw(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; /* Save off the information about this board */ hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); hw->subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); hw->subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); hw->bus.device = pci_get_slot(dev); hw->bus.func = pci_get_function(dev); } #endif /* * ixlv_attach() helper function; initalizes the admin queue * and attempts to establish contact with the PF by * retrying the initial "API version" message several times * or until the PF responds. */ static int ixlv_setup_vc(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; int error = 0, ret_error = 0, asq_retries = 0; bool send_api_ver_retried = 0; /* Need to set these AQ paramters before initializing AQ */ hw->aq.num_arq_entries = IXL_AQ_LEN; hw->aq.num_asq_entries = IXL_AQ_LEN; hw->aq.arq_buf_size = IXL_AQ_BUF_SZ; hw->aq.asq_buf_size = IXL_AQ_BUF_SZ; for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) { /* Initialize admin queue */ error = i40e_init_adminq(hw); if (error) { device_printf(dev, "%s: init_adminq failed: %d\n", __func__, error); ret_error = 1; continue; } INIT_DBG_DEV(dev, "Initialized Admin Queue; starting" " send_api_ver attempt %d", i+1); retry_send: /* Send VF's API version */ error = ixlv_send_api_ver(sc); if (error) { i40e_shutdown_adminq(hw); ret_error = 2; device_printf(dev, "%s: unable to send api" " version to PF on attempt %d, error %d\n", __func__, i+1, error); } asq_retries = 0; while (!i40e_asq_done(hw)) { if (++asq_retries > IXLV_AQ_MAX_ERR) { i40e_shutdown_adminq(hw); device_printf(dev, "Admin Queue timeout " "(waiting for send_api_ver), %d more tries...\n", IXLV_AQ_MAX_ERR - (i + 1)); ret_error = 3; break; } i40e_msec_pause(10); } if (asq_retries > IXLV_AQ_MAX_ERR) continue; INIT_DBG_DEV(dev, "Sent API version message to PF"); /* Verify that the VF accepts the PF's API version */ error = ixlv_verify_api_ver(sc); if (error == ETIMEDOUT) { if (!send_api_ver_retried) { /* Resend message, one more time */ send_api_ver_retried = true; device_printf(dev, "%s: Timeout while verifying API version on first" " try!\n", __func__); goto retry_send; } else { device_printf(dev, "%s: Timeout while verifying API version on second" " try!\n", __func__); ret_error = 4; break; } } if (error) { device_printf(dev, "%s: Unable to verify API version," " error %s\n", __func__, i40e_stat_str(hw, error)); ret_error = 5; } break; } if (ret_error >= 4) i40e_shutdown_adminq(hw); return (ret_error); } /* * ixlv_attach() helper function; asks the PF for this VF's * configuration, and saves the information if it receives it. */ static int ixlv_vf_config(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; int bufsz, error = 0, ret_error = 0; int asq_retries, retried = 0; retry_config: error = ixlv_send_vf_config_msg(sc); if (error) { device_printf(dev, "%s: Unable to send VF config request, attempt %d," " error %d\n", __func__, retried + 1, error); ret_error = 2; } asq_retries = 0; while (!i40e_asq_done(hw)) { if (++asq_retries > IXLV_AQ_MAX_ERR) { device_printf(dev, "%s: Admin Queue timeout " "(waiting for send_vf_config_msg), attempt %d\n", __func__, retried + 1); ret_error = 3; goto fail; } i40e_msec_pause(10); } INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d", retried + 1); if (!sc->vf_res) { bufsz = sizeof(struct virtchnl_vf_resource) + (I40E_MAX_VF_VSI * sizeof(struct virtchnl_vsi_resource)); sc->vf_res = malloc(bufsz, M_DEVBUF, M_NOWAIT); if (!sc->vf_res) { device_printf(dev, "%s: Unable to allocate memory for VF configuration" " message from PF on attempt %d\n", __func__, retried + 1); ret_error = 1; goto fail; } } /* Check for VF config response */ error = ixlv_get_vf_config(sc); if (error == ETIMEDOUT) { /* The 1st time we timeout, send the configuration message again */ if (!retried) { retried++; goto retry_config; } device_printf(dev, "%s: ixlv_get_vf_config() timed out waiting for a response\n", __func__); } if (error) { device_printf(dev, "%s: Unable to get VF configuration from PF after %d tries!\n", __func__, retried + 1); ret_error = 4; } goto done; fail: free(sc->vf_res, M_DEVBUF); done: return (ret_error); } static int ixlv_if_msix_intr_assign(if_ctx_t ctx, int msix) { struct ixl_vsi *vsi = iflib_get_softc(ctx); struct ixlv_sc *sc = vsi->back; struct ixl_rx_queue *que = vsi->rx_queues; struct ixl_tx_queue *tx_que = vsi->tx_queues; int err, i, rid, vector = 0; char buf[16]; /* Admin Que is vector 0*/ rid = vector + 1; err = iflib_irq_alloc_generic(ctx, &vsi->irq, rid, IFLIB_INTR_ADMIN, ixlv_msix_adminq, sc, 0, "aq"); if (err) { iflib_irq_free(ctx, &vsi->irq); device_printf(iflib_get_dev(ctx), "Failed to register Admin que handler"); return (err); } sc->admvec = vector; ++vector; /* Now set up the stations */ for (i = 0; i < vsi->num_rx_queues; i++, vector++, que++) { rid = vector + 1; snprintf(buf, sizeof(buf), "rxq%d", i); err = iflib_irq_alloc_generic(ctx, &que->que_irq, rid, IFLIB_INTR_RX, ixlv_msix_que, que, que->rxr.me, buf); if (err) { device_printf(iflib_get_dev(ctx), "Failed to allocate q int %d err: %d", i, err); vsi->num_rx_queues = i + 1; goto fail; } que->msix = vector; } for (i = 0, tx_que = vsi->tx_queues; i < vsi->num_tx_queues; i++, tx_que++) { snprintf(buf, sizeof(buf), "txq%d", i); rid = que->msix + 1; iflib_softirq_alloc_generic(ctx, rid, IFLIB_INTR_TX, tx_que, tx_que->txr.me, buf); } return (0); fail: iflib_irq_free(ctx, &vsi->irq); que = vsi->rx_queues; for (int i = 0; i < vsi->num_rx_queues; i++, que++) iflib_irq_free(ctx, &que->que_irq); return (err); } /* Enable all interrupts */ static void ixlv_if_enable_intr(if_ctx_t ctx) { struct ixl_vsi *vsi = iflib_get_softc(ctx); ixlv_enable_intr(vsi); } /* Disable all interrupts */ static void ixlv_if_disable_intr(if_ctx_t ctx) { struct ixl_vsi *vsi = iflib_get_softc(ctx); ixlv_disable_intr(vsi); } /* Enable queue interrupt */ static int ixlv_if_queue_intr_enable(if_ctx_t ctx, uint16_t rxqid) { struct ixl_vsi *vsi = iflib_get_softc(ctx); struct i40e_hw *hw = vsi->hw; struct ixl_rx_queue *que = &vsi->rx_queues[rxqid]; ixlv_enable_queue_irq(hw, que->rxr.me); return (0); } static int ixlv_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxqs, int ntxqsets) { struct ixl_vsi *vsi = iflib_get_softc(ctx); struct ixl_tx_queue *que; int i; MPASS(vsi->num_tx_queues > 0); MPASS(ntxqs == 1); MPASS(vsi->num_tx_queues == ntxqsets); /* Allocate queue structure memory */ if (!(vsi->tx_queues = (struct ixl_tx_queue *) malloc(sizeof(struct ixl_tx_queue) *ntxqsets, M_IXLV, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n"); return (ENOMEM); } for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) { struct tx_ring *txr = &que->txr; txr->me = i; que->vsi = vsi; /* get the virtual and physical address of the hardware queues */ txr->tail = I40E_QTX_TAIL1(txr->me); txr->tx_base = (struct i40e_tx_desc *)vaddrs[i]; txr->tx_paddr = paddrs[i]; txr->que = que; } // TODO: Do a config_gtask_init for admin queue here? // iflib_config_gtask_init(ctx, &adapter->mod_task, ixgbe_handle_mod, "mod_task"); device_printf(iflib_get_dev(ctx), "%s: allocated for %d txqs\n", __func__, vsi->num_tx_queues); return (0); } static int ixlv_if_rx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int nrxqs, int nrxqsets) { struct ixl_vsi *vsi = iflib_get_softc(ctx); struct ixl_rx_queue *que; int i; MPASS(vsi->num_rx_queues > 0); MPASS(nrxqs == 1); MPASS(vsi->num_rx_queues == nrxqsets); /* Allocate queue structure memory */ if (!(vsi->rx_queues = (struct ixl_rx_queue *) malloc(sizeof(struct ixl_rx_queue) * nrxqsets, M_IXLV, M_NOWAIT | M_ZERO))) { device_printf(iflib_get_dev(ctx), "Unable to allocate RX ring memory\n"); return (ENOMEM); } for (i = 0, que = vsi->rx_queues; i < nrxqsets; i++, que++) { struct rx_ring *rxr = &que->rxr; rxr->me = i; que->vsi = vsi; /* get the virtual and physical address of the hardware queues */ rxr->tail = I40E_QRX_TAIL1(rxr->me); rxr->rx_base = (union i40e_rx_desc *)vaddrs[i]; rxr->rx_paddr = paddrs[i]; rxr->que = que; } device_printf(iflib_get_dev(ctx), "%s: allocated for %d rxqs\n", __func__, vsi->num_rx_queues); return (0); } static void ixlv_if_queues_free(if_ctx_t ctx) { struct ixl_vsi *vsi = iflib_get_softc(ctx); if (vsi->tx_queues != NULL) { free(vsi->tx_queues, M_IXLV); vsi->tx_queues = NULL; } if (vsi->rx_queues != NULL) { free(vsi->rx_queues, M_IXLV); vsi->rx_queues = NULL; } } // TODO: Implement static void ixlv_if_update_admin_status(if_ctx_t ctx) { struct ixl_vsi *vsi = iflib_get_softc(ctx); //struct ixlv_sc *sc = vsi->back; //struct i40e_hw *hw = &sc->hw; //struct i40e_arq_event_info event; //i40e_status ret; //u32 loop = 0; //u16 opcode u16 result = 0; //u64 baudrate; /* TODO: Split up * - Update admin queue stuff * - Update link status * - Enqueue aq task * - Re-enable admin intr */ /* TODO: Does VF reset need to be handled here? */ #if 0 if (pf->state & IXL_PF_STATE_EMPR_RESETTING) { /* Flag cleared at end of this function */ ixl_handle_empr_reset(pf); return; } #endif #if 0 event.buf_len = IXL_AQ_BUF_SZ; event.msg_buf = malloc(event.buf_len, M_IXLV, M_NOWAIT | M_ZERO); if (!event.msg_buf) { device_printf(pf->dev, "%s: Unable to allocate memory for Admin" " Queue event!\n", __func__); return; } /* clean and process any events */ do { ret = i40e_clean_arq_element(hw, &event, &result); if (ret) break; opcode = LE16_TO_CPU(event.desc.opcode); ixl_dbg(pf, IXL_DBG_AQ, "Admin Queue event: %#06x\n", opcode); switch (opcode) { case i40e_aqc_opc_get_link_status: ixl_link_event(pf, &event); break; case i40e_aqc_opc_send_msg_to_pf: #ifdef PCI_IOV ixl_handle_vf_msg(pf, &event); #endif break; case i40e_aqc_opc_event_lan_overflow: break; default: #ifdef IXL_DEBUG printf("AdminQ unknown event %x\n", opcode); #endif break; } } while (result && (loop++ < IXL_ADM_LIMIT)); free(event.msg_buf, M_IXLV); #endif #if 0 /* XXX: This updates the link status */ if (pf->link_up) { if (vsi->link_active == FALSE) { vsi->link_active = TRUE; baudrate = ixl_max_aq_speed_to_value(pf->link_speed); iflib_link_state_change(ctx, LINK_STATE_UP, baudrate); ixl_link_up_msg(pf); // ixl_ping_all_vfs(adapter); } } else { /* Link down */ if (vsi->link_active == TRUE) { vsi->link_active = FALSE; iflib_link_state_change(ctx, LINK_STATE_DOWN, 0); // ixl_ping_all_vfs(adapter); } } #endif /* * If there are still messages to process, reschedule ourselves. * Otherwise, re-enable our interrupt and go to sleep. */ if (result > 0) iflib_admin_intr_deferred(ctx); else /* TODO: Link/adminq interrupt should be re-enabled in IFDI_LINK_INTR_ENABLE */ ixlv_enable_intr(vsi); } static void ixlv_if_multi_set(if_ctx_t ctx) { // struct ixl_vsi *vsi = iflib_get_softc(ctx); // struct i40e_hw *hw = vsi->hw; // struct ixlv_sc *sc = vsi->back; // int mcnt = 0, flags; IOCTL_DEBUGOUT("ixl_if_multi_set: begin"); // TODO: Implement #if 0 mcnt = if_multiaddr_count(iflib_get_ifp(ctx), MAX_MULTICAST_ADDR); /* delete existing MC filters */ ixlv_del_multi(vsi); if (__predict_false(mcnt == MAX_MULTICAST_ADDR)) { // Set promiscuous mode (multicast) // TODO: This needs to get handled somehow #if 0 ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd, IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete, sc); #endif return; } /* (re-)install filters for all mcast addresses */ mcnt = if_multi_apply(iflib_get_ifp(ctx), ixl_mc_filter_apply, vsi); if (mcnt > 0) { flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC); ixlv_add_hw_filters(vsi, flags, mcnt); } #endif IOCTL_DEBUGOUT("ixl_if_multi_set: end"); } static void ixlv_if_media_status(if_ctx_t ctx, struct ifmediareq *ifmr) { struct ixl_vsi *vsi = iflib_get_softc(ctx); struct ixlv_sc *sc = (struct ixlv_sc *)vsi->back; struct i40e_hw *hw = &sc->hw; INIT_DEBUGOUT("ixl_media_status: begin"); hw->phy.get_link_info = TRUE; i40e_get_link_status(hw, &sc->link_up); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!sc->link_up) { return; } ifmr->ifm_status |= IFM_ACTIVE; /* Hardware is always full-duplex */ ifmr->ifm_active |= IFM_FDX; // TODO: Check another variable to get link speed #if 0 switch (hw->phy.link_info.phy_type) { /* 100 M */ case I40E_PHY_TYPE_100BASE_TX: ifmr->ifm_active |= IFM_100_TX; break; /* 1 G */ case I40E_PHY_TYPE_1000BASE_T: ifmr->ifm_active |= IFM_1000_T; break; case I40E_PHY_TYPE_1000BASE_SX: ifmr->ifm_active |= IFM_1000_SX; break; case I40E_PHY_TYPE_1000BASE_LX: ifmr->ifm_active |= IFM_1000_LX; break; case I40E_PHY_TYPE_1000BASE_T_OPTICAL: ifmr->ifm_active |= IFM_OTHER; break; /* 10 G */ case I40E_PHY_TYPE_10GBASE_SFPP_CU: ifmr->ifm_active |= IFM_10G_TWINAX; break; case I40E_PHY_TYPE_10GBASE_SR: ifmr->ifm_active |= IFM_10G_SR; break; case I40E_PHY_TYPE_10GBASE_LR: ifmr->ifm_active |= IFM_10G_LR; break; case I40E_PHY_TYPE_10GBASE_T: ifmr->ifm_active |= IFM_10G_T; break; case I40E_PHY_TYPE_XAUI: case I40E_PHY_TYPE_XFI: case I40E_PHY_TYPE_10GBASE_AOC: ifmr->ifm_active |= IFM_OTHER; break; /* 25 G */ case I40E_PHY_TYPE_25GBASE_KR: ifmr->ifm_active |= IFM_25G_KR; break; case I40E_PHY_TYPE_25GBASE_CR: ifmr->ifm_active |= IFM_25G_CR; break; case I40E_PHY_TYPE_25GBASE_SR: ifmr->ifm_active |= IFM_25G_SR; break; case I40E_PHY_TYPE_25GBASE_LR: ifmr->ifm_active |= IFM_UNKNOWN; break; /* 40 G */ case I40E_PHY_TYPE_40GBASE_CR4: case I40E_PHY_TYPE_40GBASE_CR4_CU: ifmr->ifm_active |= IFM_40G_CR4; break; case I40E_PHY_TYPE_40GBASE_SR4: ifmr->ifm_active |= IFM_40G_SR4; break; case I40E_PHY_TYPE_40GBASE_LR4: ifmr->ifm_active |= IFM_40G_LR4; break; case I40E_PHY_TYPE_XLAUI: ifmr->ifm_active |= IFM_OTHER; break; case I40E_PHY_TYPE_1000BASE_KX: ifmr->ifm_active |= IFM_1000_KX; break; case I40E_PHY_TYPE_SGMII: ifmr->ifm_active |= IFM_1000_SGMII; break; /* ERJ: What's the difference between these? */ case I40E_PHY_TYPE_10GBASE_CR1_CU: case I40E_PHY_TYPE_10GBASE_CR1: ifmr->ifm_active |= IFM_10G_CR1; break; case I40E_PHY_TYPE_10GBASE_KX4: ifmr->ifm_active |= IFM_10G_KX4; break; case I40E_PHY_TYPE_10GBASE_KR: ifmr->ifm_active |= IFM_10G_KR; break; case I40E_PHY_TYPE_SFI: ifmr->ifm_active |= IFM_10G_SFI; break; /* Our single 20G media type */ case I40E_PHY_TYPE_20GBASE_KR2: ifmr->ifm_active |= IFM_20G_KR2; break; case I40E_PHY_TYPE_40GBASE_KR4: ifmr->ifm_active |= IFM_40G_KR4; break; case I40E_PHY_TYPE_XLPPI: case I40E_PHY_TYPE_40GBASE_AOC: ifmr->ifm_active |= IFM_40G_XLPPI; break; /* Unknown to driver */ default: ifmr->ifm_active |= IFM_UNKNOWN; break; } /* Report flow control status as well */ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) ifmr->ifm_active |= IFM_ETH_TXPAUSE; if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX) ifmr->ifm_active |= IFM_ETH_RXPAUSE; #endif } static int ixlv_if_media_change(if_ctx_t ctx) { struct ifmedia *ifm = iflib_get_media(ctx); INIT_DEBUGOUT("ixl_media_change: begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if_printf(iflib_get_ifp(ctx), "Media change is not supported.\n"); return (ENODEV); } // TODO: Rework static int ixlv_if_promisc_set(if_ctx_t ctx, int flags) { struct ixl_vsi *vsi = iflib_get_softc(ctx); struct ifnet *ifp = iflib_get_ifp(ctx); struct i40e_hw *hw = vsi->hw; int err; bool uni = FALSE, multi = FALSE; if (flags & IFF_ALLMULTI || if_multiaddr_count(ifp, MAX_MULTICAST_ADDR) == MAX_MULTICAST_ADDR) multi = TRUE; if (flags & IFF_PROMISC) uni = TRUE; err = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, uni, NULL, false); if (err) return (err); err = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid, multi, NULL); return (err); } static void ixlv_if_timer(if_ctx_t ctx, uint16_t qid) { struct ixl_vsi *vsi = iflib_get_softc(ctx); struct ixlv_sc *sc = vsi->back; //struct i40e_hw *hw = &sc->hw; //struct ixl_tx_queue *que = &vsi->tx_queues[qid]; //u32 mask; #if 0 /* ** Check status of the queues */ mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK | I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK); /* If queue param has outstanding work, trigger sw irq */ // TODO: TX queues in iflib don't use HW interrupts; does this do anything? if (que->busy) wr32(hw, I40E_PFINT_DYN_CTLN(que->txr.me), mask); #endif // XXX: Is this timer per-queue? if (qid != 0) return; /* Fire off the adminq task */ iflib_admin_intr_deferred(ctx); /* Update stats */ ixlv_request_stats(sc); } static void ixlv_if_vlan_register(if_ctx_t ctx, u16 vtag) { struct ixl_vsi *vsi = iflib_get_softc(ctx); //struct i40e_hw *hw = vsi->hw; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; ++vsi->num_vlans; // TODO: Redo // ixlv_add_filter(vsi, hw->mac.addr, vtag); } static void ixlv_if_vlan_unregister(if_ctx_t ctx, u16 vtag) { struct ixl_vsi *vsi = iflib_get_softc(ctx); //struct i40e_hw *hw = vsi->hw; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; --vsi->num_vlans; // TODO: Redo // ixlv_del_filter(vsi, hw->mac.addr, vtag); } static uint64_t ixlv_if_get_counter(if_ctx_t ctx, ift_counter cnt) { struct ixl_vsi *vsi = iflib_get_softc(ctx); if_t ifp = iflib_get_ifp(ctx); switch (cnt) { case IFCOUNTER_IPACKETS: return (vsi->ipackets); case IFCOUNTER_IERRORS: return (vsi->ierrors); case IFCOUNTER_OPACKETS: return (vsi->opackets); case IFCOUNTER_OERRORS: return (vsi->oerrors); case IFCOUNTER_COLLISIONS: /* Collisions are by standard impossible in 40G/10G Ethernet */ return (0); case IFCOUNTER_IBYTES: return (vsi->ibytes); case IFCOUNTER_OBYTES: return (vsi->obytes); case IFCOUNTER_IMCASTS: return (vsi->imcasts); case IFCOUNTER_OMCASTS: return (vsi->omcasts); case IFCOUNTER_IQDROPS: return (vsi->iqdrops); case IFCOUNTER_OQDROPS: return (vsi->oqdrops); case IFCOUNTER_NOPROTO: return (vsi->noproto); default: return (if_get_counter_default(ifp, cnt)); } } static int ixlv_allocate_pci_resources(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = iflib_get_dev(sc->vsi.ctx); int rid; /* Map BAR0 */ rid = PCIR_BAR(0); sc->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!(sc->pci_mem)) { device_printf(dev, "Unable to allocate bus resource: PCI memory\n"); return (ENXIO); } /* Save off the PCI information */ hw->vendor_id = pci_get_vendor(dev); hw->device_id = pci_get_device(dev); hw->revision_id = pci_read_config(dev, PCIR_REVID, 1); hw->subsystem_vendor_id = pci_read_config(dev, PCIR_SUBVEND_0, 2); hw->subsystem_device_id = pci_read_config(dev, PCIR_SUBDEV_0, 2); hw->bus.device = pci_get_slot(dev); hw->bus.func = pci_get_function(dev); /* Save off register access information */ sc->osdep.mem_bus_space_tag = rman_get_bustag(sc->pci_mem); sc->osdep.mem_bus_space_handle = rman_get_bushandle(sc->pci_mem); sc->osdep.mem_bus_space_size = rman_get_size(sc->pci_mem); sc->osdep.flush_reg = I40E_VFGEN_RSTAT; sc->osdep.dev = dev; sc->hw.hw_addr = (u8 *) &sc->osdep.mem_bus_space_handle; sc->hw.back = &sc->osdep; /* Disable adminq interrupts (just in case) */ /* TODO: Probably not necessary */ // ixlv_disable_adminq_irq(&sc->hw); return (0); } static void ixlv_free_pci_resources(struct ixlv_sc *sc) { struct ixl_vsi *vsi = &sc->vsi; struct ixl_rx_queue *rx_que = vsi->rx_queues; device_t dev = sc->dev; /* We may get here before stations are setup */ // TODO: Check if we can still check against sc->msix if ((sc->msix > 0) || (rx_que == NULL)) goto early; /* ** Release all msix VSI resources: */ iflib_irq_free(vsi->ctx, &vsi->irq); for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) iflib_irq_free(vsi->ctx, &rx_que->que_irq); early: if (sc->pci_mem != NULL) bus_release_resource(dev, SYS_RES_MEMORY, PCIR_BAR(0), sc->pci_mem); } /* ** Requests a VF reset from the PF. ** ** Requires the VF's Admin Queue to be initialized. */ static int ixlv_reset(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; device_t dev = sc->dev; int error = 0; /* Ask the PF to reset us if we are initiating */ if (sc->init_state != IXLV_RESET_PENDING) ixlv_request_reset(sc); i40e_msec_pause(100); error = ixlv_reset_complete(hw); if (error) { device_printf(dev, "%s: VF reset failed\n", __func__); return (error); } error = i40e_shutdown_adminq(hw); if (error) { device_printf(dev, "%s: shutdown_adminq failed: %d\n", __func__, error); return (error); } error = i40e_init_adminq(hw); if (error) { device_printf(dev, "%s: init_adminq failed: %d\n", __func__, error); return(error); } return (0); } static int ixlv_reset_complete(struct i40e_hw *hw) { u32 reg; /* Wait up to ~10 seconds */ for (int i = 0; i < 100; i++) { reg = rd32(hw, I40E_VFGEN_RSTAT) & I40E_VFGEN_RSTAT_VFR_STATE_MASK; if ((reg == VIRTCHNL_VFR_VFACTIVE) || (reg == VIRTCHNL_VFR_COMPLETED)) return (0); i40e_msec_pause(100); } return (EBUSY); } static void ixlv_setup_interface(device_t dev, struct ixl_vsi *vsi) { if_ctx_t ctx = vsi->ctx; struct ixlv_sc *sc = vsi->back; struct ifnet *ifp = iflib_get_ifp(ctx); uint64_t cap; //struct ixl_queue *que = vsi->queues; INIT_DBG_DEV(dev, "begin"); /* TODO: Remove VLAN_ENCAP_LEN? */ vsi->shared->isc_max_frame_size = ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + ETHER_VLAN_ENCAP_LEN; #if __FreeBSD_version >= 1100000 if_setbaudrate(ifp, IF_Gbps(40)); #else if_initbaudrate(ifp, IF_Gbps(40)); #endif /* Media types based on reported link speed over AdminQ */ ifmedia_add(&sc->media, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_10G_SR, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_25G_SR, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_40G_SR4, 0, NULL); ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); INIT_DBG_DEV(dev, "end"); return (0); } #if 0 /* ** Allocate and setup a single queue */ static int ixlv_setup_queue(struct ixlv_sc *sc, struct ixl_queue *que) { device_t dev = sc->dev; struct tx_ring *txr; struct rx_ring *rxr; int rsize, tsize; int error = I40E_SUCCESS; txr = &que->txr; txr->que = que; txr->tail = I40E_QTX_TAIL1(que->me); /* Initialize the TX lock */ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", device_get_nameunit(dev), que->me); mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF); /* * Create the TX descriptor ring * * In Head Writeback mode, the descriptor ring is one bigger * than the number of descriptors for space for the HW to * write back index of last completed descriptor. */ if (sc->vsi.enable_head_writeback) { tsize = roundup2((que->num_tx_desc * sizeof(struct i40e_tx_desc)) + sizeof(u32), DBA_ALIGN); } else { tsize = roundup2((que->num_tx_desc * sizeof(struct i40e_tx_desc)), DBA_ALIGN); } if (i40e_allocate_dma_mem(&sc->hw, &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) { device_printf(dev, "Unable to allocate TX Descriptor memory\n"); error = ENOMEM; goto err_destroy_tx_mtx; } txr->base = (struct i40e_tx_desc *)txr->dma.va; bzero((void *)txr->base, tsize); /* Now allocate transmit soft structs for the ring */ if (ixl_allocate_tx_data(que)) { device_printf(dev, "Critical Failure setting up TX structures\n"); error = ENOMEM; goto err_free_tx_dma; } /* Allocate a buf ring */ txr->br = buf_ring_alloc(ixlv_txbrsz, M_DEVBUF, M_WAITOK, &txr->mtx); if (txr->br == NULL) { device_printf(dev, "Critical Failure setting up TX buf ring\n"); error = ENOMEM; goto err_free_tx_data; } /* * Next the RX queues... */ rsize = roundup2(que->num_rx_desc * sizeof(union i40e_rx_desc), DBA_ALIGN); rxr = &que->rxr; rxr->que = que; rxr->tail = I40E_QRX_TAIL1(que->me); /* Initialize the RX side lock */ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", device_get_nameunit(dev), que->me); mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF); if (i40e_allocate_dma_mem(&sc->hw, &rxr->dma, i40e_mem_reserved, rsize, 4096)) { //JFV - should this be DBA? device_printf(dev, "Unable to allocate RX Descriptor memory\n"); error = ENOMEM; goto err_destroy_rx_mtx; } rxr->base = (union i40e_rx_desc *)rxr->dma.va; bzero((void *)rxr->base, rsize); /* Allocate receive soft structs for the ring */ if (ixl_allocate_rx_data(que)) { device_printf(dev, "Critical Failure setting up receive structs\n"); error = ENOMEM; goto err_free_rx_dma; } return (0); err_free_rx_dma: i40e_free_dma_mem(&sc->hw, &rxr->dma); err_destroy_rx_mtx: mtx_destroy(&rxr->mtx); /* err_free_tx_buf_ring */ buf_ring_free(txr->br, M_DEVBUF); err_free_tx_data: ixl_free_que_tx(que); err_free_tx_dma: i40e_free_dma_mem(&sc->hw, &txr->dma); err_destroy_tx_mtx: mtx_destroy(&txr->mtx); return (error); } #endif /* ** Allocate and setup the interface queues */ static int ixlv_setup_queues(struct ixlv_sc *sc) { device_t dev = sc->dev; struct ixl_vsi *vsi; struct ixl_queue *que; int i; int error = I40E_SUCCESS; vsi = &sc->vsi; vsi->back = (void *)sc; vsi->hw = &sc->hw; vsi->num_vlans = 0; /* Get memory for the station queues */ if (!(vsi->queues = (struct ixl_queue *) malloc(sizeof(struct ixl_queue) * vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "Unable to allocate queue memory\n"); return ENOMEM; } for (i = 0; i < vsi->num_queues; i++) { que = &vsi->queues[i]; que->num_tx_desc = vsi->num_tx_desc; que->num_rx_desc = vsi->num_rx_desc; que->me = i; que->vsi = vsi; if (ixlv_setup_queue(sc, que)) { error = ENOMEM; goto err_free_queues; } } return (0); err_free_queues: while (i--) ixlv_free_queue(sc, &vsi->queues[i]); free(vsi->queues, M_DEVBUF); return (error); } #if 0 /* ** This routine is run via an vlan config EVENT, ** it enables us to use the HW Filter table since ** we can get the vlan id. This just creates the ** entry in the soft version of the VFTA, init will ** repopulate the real table. */ static void ixlv_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct ixl_vsi *vsi = arg; struct ixlv_sc *sc = vsi->back; struct ixlv_vlan_filter *v; if (ifp->if_softc != arg) /* Not our event */ return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; /* Sanity check - make sure it doesn't already exist */ SLIST_FOREACH(v, sc->vlan_filters, next) { if (v->vlan == vtag) return; } mtx_lock(&sc->mtx); ++vsi->num_vlans; v = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO); SLIST_INSERT_HEAD(sc->vlan_filters, v, next); v->vlan = vtag; v->flags = IXL_FILTER_ADD; ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd, IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc); mtx_unlock(&sc->mtx); return; } /* ** This routine is run via an vlan ** unconfig EVENT, remove our entry ** in the soft vfta. */ static void ixlv_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) { struct ixl_vsi *vsi = arg; struct ixlv_sc *sc = vsi->back; struct ixlv_vlan_filter *v; int i = 0; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) /* Invalid */ return; mtx_lock(&sc->mtx); SLIST_FOREACH(v, sc->vlan_filters, next) { if (v->vlan == vtag) { v->flags = IXL_FILTER_DEL; ++i; --vsi->num_vlans; } } if (i) ixl_vc_enqueue(&sc->vc_mgr, &sc->del_vlan_cmd, IXLV_FLAG_AQ_DEL_VLAN_FILTER, ixl_init_cmd_complete, sc); mtx_unlock(&sc->mtx); return; } #endif /* ** Get a new filter and add it to the mac filter list. */ static struct ixlv_mac_filter * ixlv_get_mac_filter(struct ixlv_sc *sc) { struct ixlv_mac_filter *f; f = malloc(sizeof(struct ixlv_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); if (f) SLIST_INSERT_HEAD(sc->mac_filters, f, next); return (f); } /* ** Find the filter with matching MAC address */ static struct ixlv_mac_filter * ixlv_find_mac_filter(struct ixlv_sc *sc, u8 *macaddr) { struct ixlv_mac_filter *f; bool match = FALSE; SLIST_FOREACH(f, sc->mac_filters, next) { if (cmp_etheraddr(f->macaddr, macaddr)) { match = TRUE; break; } } if (!match) f = NULL; return (f); } /* ** Admin Queue interrupt handler */ static int ixlv_msix_adminq(void *arg) { struct ixlv_sc *sc = arg; struct i40e_hw *hw = &sc->hw; // device_t dev = sc->dev; u32 reg; bool do_task = FALSE; ++sc->admin_irq; reg = rd32(hw, I40E_VFINT_ICR01); mask = rd32(hw, I40E_VFINT_ICR0_ENA1); reg = rd32(hw, I40E_VFINT_DYN_CTL01); reg |= I40E_VFINT_DYN_CTL01_CLEARPBA_MASK; wr32(hw, I40E_VFINT_DYN_CTL01, reg); /* Check on the cause */ if (reg & I40E_VFINT_ICR0_ADMINQ_MASK) do_task = TRUE; if (do_task) iflib_admin_intr_deferred(sc->vsi.ctx); else ixlv_enable_adminq_irq(hw); return (FILTER_HANDLED); } void ixlv_enable_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; struct ixl_rx_queue *que = vsi->rx_queues; ixlv_enable_adminq_irq(hw); for (int i = 0; i < vsi->num_rx_queues; i++, que++) ixlv_enable_queue_irq(hw, que->rxr.me); } void ixlv_disable_intr(struct ixl_vsi *vsi) { struct i40e_hw *hw = vsi->hw; struct ixl_rx_queue *que = vsi->rx_queues; ixlv_disable_adminq_irq(hw); for (int i = 0; i < vsi->num_rx_queues; i++, que++) ixlv_disable_queue_irq(hw, que->rxr.me); } static void ixlv_disable_adminq_irq(struct i40e_hw *hw) { wr32(hw, I40E_VFINT_DYN_CTL01, 0); wr32(hw, I40E_VFINT_ICR0_ENA1, 0); /* flush */ rd32(hw, I40E_VFGEN_RSTAT); } static void ixlv_enable_adminq_irq(struct i40e_hw *hw) { wr32(hw, I40E_VFINT_DYN_CTL01, I40E_VFINT_DYN_CTL01_INTENA_MASK | I40E_VFINT_DYN_CTL01_ITR_INDX_MASK); wr32(hw, I40E_VFINT_ICR0_ENA1, I40E_VFINT_ICR0_ENA1_ADMINQ_MASK); /* flush */ rd32(hw, I40E_VFGEN_RSTAT); } static void ixlv_enable_queue_irq(struct i40e_hw *hw, int id) { u32 reg; reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK | I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg); } static void ixlv_disable_queue_irq(struct i40e_hw *hw, int id) { wr32(hw, I40E_VFINT_DYN_CTLN1(id), I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK); rd32(hw, I40E_VFGEN_RSTAT); return; } /* * Get initial ITR values from tunable values. */ static void ixlv_configure_itr(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; struct ixl_vsi *vsi = &sc->vsi; struct ixl_rx_queue *rx_que = vsi->rx_queues; vsi->rx_itr_setting = ixlv_rx_itr; //vsi->tx_itr_setting = ixlv_tx_itr; for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) { struct rx_ring *rxr = &rx_que->rxr; wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i), vsi->rx_itr_setting); rxr->itr = vsi->rx_itr_setting; rxr->latency = IXL_AVE_LATENCY; #if 0 struct tx_ring *txr = &que->txr; wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i), vsi->tx_itr_setting); txr->itr = vsi->tx_itr_setting; txr->latency = IXL_AVE_LATENCY; #endif } } /* ** Provide a update to the queue RX ** interrupt moderation value. */ static void ixlv_set_queue_rx_itr(struct ixl_rx_queue *que) { struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct rx_ring *rxr = &que->rxr; u16 rx_itr; u16 rx_latency = 0; int rx_bytes; /* Idle, do nothing */ if (rxr->bytes == 0) return; if (ixlv_dynamic_rx_itr) { rx_bytes = rxr->bytes/rxr->itr; rx_itr = rxr->itr; /* Adjust latency range */ switch (rxr->latency) { case IXL_LOW_LATENCY: if (rx_bytes > 10) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (rx_bytes > 20) { rx_latency = IXL_BULK_LATENCY; rx_itr = IXL_ITR_8K; } else if (rx_bytes <= 10) { rx_latency = IXL_LOW_LATENCY; rx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (rx_bytes <= 20) { rx_latency = IXL_AVE_LATENCY; rx_itr = IXL_ITR_20K; } break; } rxr->latency = rx_latency; if (rx_itr != rxr->itr) { /* do an exponential smoothing */ rx_itr = (10 * rx_itr * rxr->itr) / ((9 * rx_itr) + rxr->itr); rxr->itr = min(rx_itr, IXL_MAX_ITR); wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, que->rxr.me), rxr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC) vsi->rx_itr_setting = ixlv_rx_itr; /* Update the hardware if needed */ if (rxr->itr != vsi->rx_itr_setting) { rxr->itr = vsi->rx_itr_setting; wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, que->rxr.me), rxr->itr); } } rxr->bytes = 0; rxr->packets = 0; return; } /* ** Provide a update to the queue TX ** interrupt moderation value. */ static void ixlv_set_queue_tx_itr(struct ixl_tx_queue *que) { struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; u16 tx_itr; u16 tx_latency = 0; int tx_bytes; /* Idle, do nothing */ if (txr->bytes == 0) return; if (ixlv_dynamic_tx_itr) { tx_bytes = txr->bytes/txr->itr; tx_itr = txr->itr; switch (txr->latency) { case IXL_LOW_LATENCY: if (tx_bytes > 10) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; case IXL_AVE_LATENCY: if (tx_bytes > 20) { tx_latency = IXL_BULK_LATENCY; tx_itr = IXL_ITR_8K; } else if (tx_bytes <= 10) { tx_latency = IXL_LOW_LATENCY; tx_itr = IXL_ITR_100K; } break; case IXL_BULK_LATENCY: if (tx_bytes <= 20) { tx_latency = IXL_AVE_LATENCY; tx_itr = IXL_ITR_20K; } break; } txr->latency = tx_latency; if (tx_itr != txr->itr) { /* do an exponential smoothing */ tx_itr = (10 * tx_itr * txr->itr) / ((9 * tx_itr) + txr->itr); txr->itr = min(tx_itr, IXL_MAX_ITR); wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, que->txr.me), txr->itr); } } else { /* We may have have toggled to non-dynamic */ if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC) vsi->tx_itr_setting = ixlv_tx_itr; /* Update the hardware if needed */ if (txr->itr != vsi->tx_itr_setting) { txr->itr = vsi->tx_itr_setting; wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, que->txr.me), txr->itr); } } txr->bytes = 0; txr->packets = 0; return; } #if 0 /* ** ** MSIX Interrupt Handlers and Tasklets ** */ static void ixlv_handle_que(void *context, int pending) { struct ixl_queue *que = context; struct ixl_vsi *vsi = que->vsi; struct i40e_hw *hw = vsi->hw; struct tx_ring *txr = &que->txr; struct ifnet *ifp = vsi->ifp; bool more; if (ifp->if_drv_flags & IFF_DRV_RUNNING) { more = ixl_rxeof(que, IXL_RX_LIMIT); mtx_lock(&txr->mtx); ixl_txeof(que); if (!drbr_empty(ifp, txr->br)) ixl_mq_start_locked(ifp, txr); mtx_unlock(&txr->mtx); if (more) { taskqueue_enqueue(que->tq, &que->task); return; } } /* Reenable this interrupt - hmmm */ ixlv_enable_queue_irq(hw, que->me); return; } #endif static int ixlv_msix_que(void *arg) { struct ixl_rx_queue *que = arg; ++que->irqs; ixlv_set_queue_rx_itr(que); ixlv_set_queue_tx_itr(que); return (FILTER_SCHEDULE_THREAD); } /********************************************************************* * * Media Ioctl callback * * This routine is called whenever the user queries the status of * the interface using ifconfig. * **********************************************************************/ static void ixlv_media_status(struct ifnet * ifp, struct ifmediareq * ifmr) { struct ixl_vsi *vsi = ifp->if_softc; struct ixlv_sc *sc = vsi->back; INIT_DBG_IF(ifp, "begin"); mtx_lock(&sc->mtx); ixlv_update_link_status(sc); ifmr->ifm_status = IFM_AVALID; ifmr->ifm_active = IFM_ETHER; if (!sc->link_up) { mtx_unlock(&sc->mtx); INIT_DBG_IF(ifp, "end: link not up"); return; } ifmr->ifm_status |= IFM_ACTIVE; /* Hardware is always full-duplex */ ifmr->ifm_active |= IFM_FDX; /* Based on the link speed reported by the PF over the AdminQ, choose a * PHY type to report. This isn't 100% correct since we don't really * know the underlying PHY type of the PF, but at least we can report * a valid link speed... */ switch (sc->link_speed) { case VIRTCHNL_LINK_SPEED_100MB: ifmr->ifm_active |= IFM_100_TX; break; case VIRTCHNL_LINK_SPEED_1GB: ifmr->ifm_active |= IFM_1000_T; break; case VIRTCHNL_LINK_SPEED_10GB: ifmr->ifm_active |= IFM_10G_SR; break; case VIRTCHNL_LINK_SPEED_20GB: case VIRTCHNL_LINK_SPEED_25GB: ifmr->ifm_active |= IFM_25G_SR; break; case VIRTCHNL_LINK_SPEED_40GB: ifmr->ifm_active |= IFM_40G_SR4; break; default: ifmr->ifm_active |= IFM_UNKNOWN; break; } mtx_unlock(&sc->mtx); INIT_DBG_IF(ifp, "end"); return; } /********************************************************************* * * Media Ioctl callback * * This routine is called when the user changes speed/duplex using * media/mediopt option with ifconfig. * **********************************************************************/ static int ixlv_media_change(struct ifnet * ifp) { struct ixl_vsi *vsi = ifp->if_softc; struct ifmedia *ifm = &vsi->media; INIT_DBG_IF(ifp, "begin"); if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) return (EINVAL); if_printf(ifp, "Changing speed is not supported\n"); INIT_DBG_IF(ifp, "end"); return (ENODEV); } #if 0 /********************************************************************* * Multicast Initialization * * This routine is called by init to reset a fresh state. * **********************************************************************/ static void ixlv_init_multi(struct ixl_vsi *vsi) { struct ixlv_mac_filter *f; struct ixlv_sc *sc = vsi->back; int mcnt = 0; IOCTL_DBG_IF(vsi->ifp, "begin"); /* First clear any multicast filters */ SLIST_FOREACH(f, sc->mac_filters, next) { if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { f->flags |= IXL_FILTER_DEL; mcnt++; } } if (mcnt > 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd, IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete, sc); IOCTL_DBG_IF(vsi->ifp, "end"); } static void ixlv_add_multi(struct ixl_vsi *vsi) { struct ifmultiaddr *ifma; struct ifnet *ifp = vsi->ifp; struct ixlv_sc *sc = vsi->back; int mcnt = 0; IOCTL_DBG_IF(ifp, "begin"); if_maddr_rlock(ifp); /* ** Get a count, to decide if we ** simply use multicast promiscuous. */ CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; mcnt++; } if_maddr_runlock(ifp); /* TODO: Remove -- cannot set promiscuous mode in a VF */ if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) { /* delete all multicast filters */ ixlv_init_multi(vsi); sc->promiscuous_flags |= FLAG_VF_MULTICAST_PROMISC; ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd, IXLV_FLAG_AQ_CONFIGURE_PROMISC, ixl_init_cmd_complete, sc); IOCTL_DEBUGOUT("%s: end: too many filters", __func__); return; } mcnt = 0; if_maddr_rlock(ifp); CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; if (!ixlv_add_mac_filter(sc, (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr), IXL_FILTER_MC)) mcnt++; } if_maddr_runlock(ifp); /* ** Notify AQ task that sw filters need to be ** added to hw list */ if (mcnt > 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->add_multi_cmd, IXLV_FLAG_AQ_ADD_MAC_FILTER, ixl_init_cmd_complete, sc); IOCTL_DBG_IF(ifp, "end"); } static void ixlv_del_multi(struct ixl_vsi *vsi) { struct ixlv_mac_filter *f; struct ifmultiaddr *ifma; struct ifnet *ifp = vsi->ifp; struct ixlv_sc *sc = vsi->back; int mcnt = 0; bool match = FALSE; IOCTL_DBG_IF(ifp, "begin"); /* Search for removed multicast addresses */ if_maddr_rlock(ifp); SLIST_FOREACH(f, sc->mac_filters, next) { if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) { /* check if mac address in filter is in sc's list */ match = FALSE; CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr); if (cmp_etheraddr(f->macaddr, mc_addr)) { match = TRUE; break; } } /* if this filter is not in the sc's list, remove it */ if (match == FALSE && !(f->flags & IXL_FILTER_DEL)) { f->flags |= IXL_FILTER_DEL; mcnt++; IOCTL_DBG_IF(ifp, "marked: " MAC_FORMAT, MAC_FORMAT_ARGS(f->macaddr)); } else if (match == FALSE) IOCTL_DBG_IF(ifp, "exists: " MAC_FORMAT, MAC_FORMAT_ARGS(f->macaddr)); } } if_maddr_runlock(ifp); if (mcnt > 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->del_multi_cmd, IXLV_FLAG_AQ_DEL_MAC_FILTER, ixl_init_cmd_complete, sc); IOCTL_DBG_IF(ifp, "end"); } static void ixlv_local_timer(void *arg) { struct ixlv_sc *sc = arg; struct i40e_hw *hw = &sc->hw; struct ixl_vsi *vsi = &sc->vsi; u32 val; IXLV_CORE_LOCK_ASSERT(sc); /* If Reset is in progress just bail */ if (sc->init_state == IXLV_RESET_PENDING) return; /* Check for when PF triggers a VF reset */ val = rd32(hw, I40E_VFGEN_RSTAT) & I40E_VFGEN_RSTAT_VFR_STATE_MASK; if (val != VIRTCHNL_VFR_VFACTIVE && val != VIRTCHNL_VFR_COMPLETED) { DDPRINTF(sc->dev, "reset in progress! (%d)", val); return; } ixlv_request_stats(sc); /* clean and process any events */ taskqueue_enqueue(sc->tq, &sc->aq_irq); /* Increment stat when a queue shows hung */ if (ixl_queue_hang_check(vsi)) sc->watchdog_events++; callout_reset(&sc->timer, hz, ixlv_local_timer, sc); } /* ** Note: this routine updates the OS on the link state ** the real check of the hardware only happens with ** a link interrupt. */ void ixlv_update_link_status(struct ixlv_sc *sc) { struct ixl_vsi *vsi = &sc->vsi; struct ifnet *ifp = vsi->ifp; if (sc->link_up){ if (vsi->link_active == FALSE) { if (bootverbose) if_printf(ifp,"Link is Up, %s\n", ixlv_vc_speed_to_string(sc->link_speed)); vsi->link_active = TRUE; if_link_state_change(ifp, LINK_STATE_UP); } } else { /* Link down */ if (vsi->link_active == TRUE) { if (bootverbose) if_printf(ifp,"Link is Down\n"); if_link_state_change(ifp, LINK_STATE_DOWN); vsi->link_active = FALSE; } } return; } #endif /********************************************************************* * * This routine disables all traffic on the adapter by issuing a * global reset on the MAC and deallocates TX/RX buffers. * **********************************************************************/ static void ixlv_stop(struct ixlv_sc *sc) { struct ifnet *ifp; int start; ifp = sc->vsi.ifp; INIT_DBG_IF(ifp, "begin"); ixl_vc_flush(&sc->vc_mgr); ixlv_disable_queues(sc); start = ticks; while ((ifp->if_drv_flags & IFF_DRV_RUNNING) && ((ticks - start) < hz/10)) ixlv_do_adminq_locked(sc); /* Stop the local timer */ callout_stop(&sc->timer); INIT_DBG_IF(ifp, "end"); } static void ixlv_if_stop(if_ctx_t ctx) { struct ixl_vsi *vsi = iflib_get_softc(ctx); ixlv_stop(sc); } static void ixlv_config_rss_reg(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; struct ixl_vsi *vsi = &sc->vsi; u32 lut = 0; u64 set_hena = 0, hena; int i, j, que_id; u32 rss_seed[IXL_RSS_KEY_SIZE_REG]; #ifdef RSS u32 rss_hash_config; #endif /* Don't set up RSS if using a single queue */ if (vsi->num_rx_queues == 1) { wr32(hw, I40E_VFQF_HENA(0), 0); wr32(hw, I40E_VFQF_HENA(1), 0); ixl_flush(hw); return; } #ifdef RSS /* Fetch the configured RSS key */ rss_getkey((uint8_t *) &rss_seed); #else ixl_get_default_rss_key(rss_seed); #endif /* Fill out hash function seed */ for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]); /* Enable PCTYPES for RSS: */ #ifdef RSS rss_hash_config = rss_gethashconfig(); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER); if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6); if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP); if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6) set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP); #else set_hena = IXL_DEFAULT_RSS_HENA_XL710; #endif hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) | ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32); hena |= set_hena; wr32(hw, I40E_VFQF_HENA(0), (u32)hena); wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); /* Populate the LUT with max no. of queues in round robin fashion */ for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) { if (j == vsi->num_rx_queues) j = 0; #ifdef RSS /* * Fetch the RSS bucket id for the given indirection entry. * Cap it at the number of configured buckets (which is * num_queues.) */ que_id = rss_get_indirection_to_bucket(i); que_id = que_id % vsi->num_queues; #else que_id = j; #endif /* lut = 4-byte sliding window of 4 lut entries */ lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK); /* On i = 3, we have 4 entries in lut; write to the register */ if ((i & 3) == 3) { wr32(hw, I40E_VFQF_HLUT(i >> 2), lut); DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut); } } ixl_flush(hw); } static void ixlv_config_rss_pf(struct ixlv_sc *sc) { ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd, IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc); ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd, IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc); ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd, IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc); } /* ** ixlv_config_rss - setup RSS ** ** RSS keys and table are cleared on VF reset. */ static void ixlv_config_rss(struct ixlv_sc *sc) { if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_REG) { DDPRINTF(sc->dev, "Setting up RSS using VF registers..."); ixlv_config_rss_reg(sc); } else if (sc->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { DDPRINTF(sc->dev, "Setting up RSS using messages to PF..."); ixlv_config_rss_pf(sc); } else device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n"); } /* ** This routine refreshes vlan filters, called by init ** it scans the filter table and then updates the AQ */ static void ixlv_setup_vlan_filters(struct ixlv_sc *sc) { struct ixl_vsi *vsi = &sc->vsi; struct ixlv_vlan_filter *f; int cnt = 0; if (vsi->num_vlans == 0) return; /* ** Scan the filter table for vlan entries, ** and if found call for the AQ update. */ SLIST_FOREACH(f, sc->vlan_filters, next) if (f->flags & IXL_FILTER_ADD) cnt++; if (cnt > 0) ixl_vc_enqueue(&sc->vc_mgr, &sc->add_vlan_cmd, IXLV_FLAG_AQ_ADD_VLAN_FILTER, ixl_init_cmd_complete, sc); } /* ** This routine adds new MAC filters to the sc's list; ** these are later added in hardware by sending a virtual ** channel message. */ static int ixlv_add_mac_filter(struct ixlv_sc *sc, u8 *macaddr, u16 flags) { struct ixlv_mac_filter *f; /* Does one already exist? */ f = ixlv_find_mac_filter(sc, macaddr); if (f != NULL) { IDPRINTF(sc->vsi.ifp, "exists: " MAC_FORMAT, MAC_FORMAT_ARGS(macaddr)); return (EEXIST); } /* If not, get a new empty filter */ f = ixlv_get_mac_filter(sc); if (f == NULL) { if_printf(sc->vsi.ifp, "%s: no filters available!!\n", __func__); return (ENOMEM); } IDPRINTF(sc->vsi.ifp, "marked: " MAC_FORMAT, MAC_FORMAT_ARGS(macaddr)); bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN); f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED); f->flags |= flags; return (0); } /* ** Marks a MAC filter for deletion. */ static int ixlv_del_mac_filter(struct ixlv_sc *sc, u8 *macaddr) { struct ixlv_mac_filter *f; f = ixlv_find_mac_filter(sc, macaddr); if (f == NULL) return (ENOENT); f->flags |= IXL_FILTER_DEL; return (0); } static void ixlv_do_adminq_locked(struct ixlv_sc *sc) { struct i40e_hw *hw = &sc->hw; struct i40e_arq_event_info event; struct virtchnl_msg *v_msg; device_t dev = sc->dev; u16 result = 0; u32 reg, oldreg; i40e_status ret; bool aq_error = false; event.buf_len = IXL_AQ_BUF_SZ; event.msg_buf = sc->aq_buffer; v_msg = (struct virtchnl_msg *)&event.desc; do { ret = i40e_clean_arq_element(hw, &event, &result); if (ret) break; ixlv_vc_completion(sc, v_msg->v_opcode, v_msg->v_retval, event.msg_buf, event.msg_len); if (result != 0) bzero(event.msg_buf, IXL_AQ_BUF_SZ); } while (result); /* check for Admin queue errors */ oldreg = reg = rd32(hw, hw->aq.arq.len); if (reg & I40E_VF_ARQLEN1_ARQVFE_MASK) { device_printf(dev, "ARQ VF Error detected\n"); reg &= ~I40E_VF_ARQLEN1_ARQVFE_MASK; aq_error = true; } if (reg & I40E_VF_ARQLEN1_ARQOVFL_MASK) { device_printf(dev, "ARQ Overflow Error detected\n"); reg &= ~I40E_VF_ARQLEN1_ARQOVFL_MASK; aq_error = true; } if (reg & I40E_VF_ARQLEN1_ARQCRIT_MASK) { device_printf(dev, "ARQ Critical Error detected\n"); reg &= ~I40E_VF_ARQLEN1_ARQCRIT_MASK; aq_error = true; } if (oldreg != reg) wr32(hw, hw->aq.arq.len, reg); oldreg = reg = rd32(hw, hw->aq.asq.len); if (reg & I40E_VF_ATQLEN1_ATQVFE_MASK) { device_printf(dev, "ASQ VF Error detected\n"); reg &= ~I40E_VF_ATQLEN1_ATQVFE_MASK; aq_error = true; } if (reg & I40E_VF_ATQLEN1_ATQOVFL_MASK) { device_printf(dev, "ASQ Overflow Error detected\n"); reg &= ~I40E_VF_ATQLEN1_ATQOVFL_MASK; aq_error = true; } if (reg & I40E_VF_ATQLEN1_ATQCRIT_MASK) { device_printf(dev, "ASQ Critical Error detected\n"); reg &= ~I40E_VF_ATQLEN1_ATQCRIT_MASK; aq_error = true; } if (oldreg != reg) wr32(hw, hw->aq.asq.len, reg); if (aq_error) { /* Need to reset adapter */ device_printf(dev, "WARNING: Resetting!\n"); sc->init_state = IXLV_RESET_REQUIRED; ixlv_stop(sc); // TODO: Make stop/init calls match ixlv_if_init(sc->vsi.ctx); } ixlv_enable_adminq_irq(hw); } static void ixlv_add_sysctls(struct ixlv_sc *sc) { device_t dev = sc->dev; struct ixl_vsi *vsi = &sc->vsi; struct i40e_eth_stats *es = &vsi->eth_stats; struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); struct sysctl_oid *tree = device_get_sysctl_tree(dev); struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); struct sysctl_oid *vsi_node; // *queue_node; struct sysctl_oid_list *vsi_list; // *queue_list; #define QUEUE_NAME_LEN 32 //char queue_namebuf[QUEUE_NAME_LEN]; #if 0 struct ixl_queue *queues = vsi->queues; struct tX_ring *txr; struct rx_ring *rxr; #endif /* Driver statistics sysctls */ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "watchdog_events", CTLFLAG_RD, &sc->watchdog_events, "Watchdog timeouts"); SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq", CTLFLAG_RD, &sc->admin_irq, "Admin Queue IRQ Handled"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_ring_size", CTLFLAG_RD, &vsi->num_tx_desc, 0, "TX ring size"); SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rx_ring_size", CTLFLAG_RD, &vsi->num_rx_desc, 0, "RX ring size"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD, sc, 0, ixlv_sysctl_current_speed, "A", "Current Port Speed"); /* VSI statistics sysctls */ vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "vsi", CTLFLAG_RD, NULL, "VSI-specific statistics"); vsi_list = SYSCTL_CHILDREN(vsi_node); struct ixl_sysctl_info ctls[] = { {&es->rx_bytes, "good_octets_rcvd", "Good Octets Received"}, {&es->rx_unicast, "ucast_pkts_rcvd", "Unicast Packets Received"}, {&es->rx_multicast, "mcast_pkts_rcvd", "Multicast Packets Received"}, {&es->rx_broadcast, "bcast_pkts_rcvd", "Broadcast Packets Received"}, {&es->rx_discards, "rx_discards", "Discarded RX packets"}, {&es->rx_unknown_protocol, "rx_unknown_proto", "RX unknown protocol packets"}, {&es->tx_bytes, "good_octets_txd", "Good Octets Transmitted"}, {&es->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"}, {&es->tx_multicast, "mcast_pkts_txd", "Multicast Packets Transmitted"}, {&es->tx_broadcast, "bcast_pkts_txd", "Broadcast Packets Transmitted"}, {&es->tx_errors, "tx_errors", "TX packet errors"}, // end {0,0,0} }; struct ixl_sysctl_info *entry = ctls; while (entry->stat != NULL) { SYSCTL_ADD_QUAD(ctx, child, OID_AUTO, entry->name, CTLFLAG_RD, entry->stat, entry->description); entry++; } #if 0 /* Queue sysctls */ for (int q = 0; q < vsi->num_queues; q++) { snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q); queue_node = SYSCTL_ADD_NODE(ctx, vsi_list, OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue Name"); queue_list = SYSCTL_CHILDREN(queue_node); txr = &(queues[q].txr); rxr = &(queues[q].rxr); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed", CTLFLAG_RD, &(queues[q].mbuf_defrag_failed), "m_defrag() failed"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "dropped", CTLFLAG_RD, &(queues[q].dropped_pkts), "Driver dropped packets"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "irqs", CTLFLAG_RD, &(queues[q].irqs), "irqs on this queue"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx", CTLFLAG_RD, &(queues[q].tso), "TSO"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed", CTLFLAG_RD, &(queues[q].tx_dmamap_failed), "Driver tx dma failure in xmit"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", CTLFLAG_RD, &(txr->no_desc), "Queue No Descriptor Available"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets", CTLFLAG_RD, &(txr->total_packets), "Queue Packets Transmitted"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, &(txr->tx_bytes), "Queue Bytes Transmitted"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets", CTLFLAG_RD, &(rxr->rx_packets), "Queue Packets Received"); SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, &(rxr->rx_bytes), "Queue Bytes Received"); SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr", CTLFLAG_RD, &(rxr->itr), 0, "Queue Rx ITR Interval"); SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr", CTLFLAG_RD, &(txr->itr), 0, "Queue Tx ITR Interval"); #ifdef IXL_DEBUG /* Examine queue state */ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head", CTLTYPE_UINT | CTLFLAG_RD, &queues[q], sizeof(struct ixl_queue), ixlv_sysctl_qtx_tail_handler, "IU", "Queue Transmit Descriptor Tail"); SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_head", CTLTYPE_UINT | CTLFLAG_RD, &queues[q], sizeof(struct ixl_queue), ixlv_sysctl_qrx_tail_handler, "IU", "Queue Receive Descriptor Tail"); SYSCTL_ADD_INT(ctx, queue_list, OID_AUTO, "watchdog_timer", CTLFLAG_RD, &(txr.watchdog_timer), 0, "Ticks before watchdog event is triggered"); #endif } #endif } static void ixlv_init_filters(struct ixlv_sc *sc) { sc->mac_filters = malloc(sizeof(struct ixlv_mac_filter), M_DEVBUF, M_NOWAIT | M_ZERO); SLIST_INIT(sc->mac_filters); sc->vlan_filters = malloc(sizeof(struct ixlv_vlan_filter), M_DEVBUF, M_NOWAIT | M_ZERO); SLIST_INIT(sc->vlan_filters); } static void ixlv_free_filters(struct ixlv_sc *sc) { struct ixlv_mac_filter *f; struct ixlv_vlan_filter *v; while (!SLIST_EMPTY(sc->mac_filters)) { f = SLIST_FIRST(sc->mac_filters); SLIST_REMOVE_HEAD(sc->mac_filters, next); free(f, M_DEVBUF); } free(sc->mac_filters, M_DEVBUF); while (!SLIST_EMPTY(sc->vlan_filters)) { v = SLIST_FIRST(sc->vlan_filters); SLIST_REMOVE_HEAD(sc->vlan_filters, next); free(v, M_DEVBUF); } free(sc->vlan_filters, M_DEVBUF); } static char * ixlv_vc_speed_to_string(enum virtchnl_link_speed link_speed) { int index; char *speeds[] = { "Unknown", "100 Mbps", "1 Gbps", "10 Gbps", "40 Gbps", "20 Gbps", "25 Gbps", }; switch (link_speed) { case VIRTCHNL_LINK_SPEED_100MB: index = 1; break; case VIRTCHNL_LINK_SPEED_1GB: index = 2; break; case VIRTCHNL_LINK_SPEED_10GB: index = 3; break; case VIRTCHNL_LINK_SPEED_40GB: index = 4; break; case VIRTCHNL_LINK_SPEED_20GB: index = 5; break; case VIRTCHNL_LINK_SPEED_25GB: index = 6; break; case VIRTCHNL_LINK_SPEED_UNKNOWN: default: index = 0; break; } return speeds[index]; } static int ixlv_sysctl_current_speed(SYSCTL_HANDLER_ARGS) { struct ixlv_sc *sc = (struct ixlv_sc *)arg1; int error = 0; error = sysctl_handle_string(oidp, ixlv_vc_speed_to_string(sc->link_speed), 8, req); return (error); } #ifdef IXL_DEBUG /** * ixlv_sysctl_qtx_tail_handler * Retrieves I40E_QTX_TAIL1 value from hardware * for a sysctl. */ static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS) { struct ixl_queue *que; int error; u32 val; que = ((struct ixl_queue *)oidp->oid_arg1); if (!que) return 0; val = rd32(que->vsi->hw, que->txr.tail); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } /** * ixlv_sysctl_qrx_tail_handler * Retrieves I40E_QRX_TAIL1 value from hardware * for a sysctl. */ static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS) { struct ixl_queue *que; int error; u32 val; que = ((struct ixl_queue *)oidp->oid_arg1); if (!que) return 0; val = rd32(que->vsi->hw, que->rxr.tail); error = sysctl_handle_int(oidp, &val, 0, req); if (error || !req->newptr) return error; return (0); } #endif Index: head/sys/dev/mfi/mfi_pci.c =================================================================== --- head/sys/dev/mfi/mfi_pci.c (revision 338948) +++ head/sys/dev/mfi/mfi_pci.c (revision 338949) @@ -1,337 +1,340 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD AND BSD-2-Clause * * Copyright (c) 2006 IronPort Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /*- * Copyright (c) 2007 LSI Corp. * Copyright (c) 2007 Rajesh Prabhakaran. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* PCI/PCI-X/PCIe bus interface for the LSI MegaSAS controllers */ #include "opt_mfi.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int mfi_pci_probe(device_t); static int mfi_pci_attach(device_t); static int mfi_pci_detach(device_t); static int mfi_pci_suspend(device_t); static int mfi_pci_resume(device_t); static void mfi_pci_free(struct mfi_softc *); static device_method_t mfi_methods[] = { DEVMETHOD(device_probe, mfi_pci_probe), DEVMETHOD(device_attach, mfi_pci_attach), DEVMETHOD(device_detach, mfi_pci_detach), DEVMETHOD(device_suspend, mfi_pci_suspend), DEVMETHOD(device_resume, mfi_pci_resume), DEVMETHOD_END }; static driver_t mfi_pci_driver = { "mfi", mfi_methods, sizeof(struct mfi_softc) }; static devclass_t mfi_devclass; -DRIVER_MODULE(mfi, pci, mfi_pci_driver, mfi_devclass, 0, 0); -MODULE_VERSION(mfi, 1); static int mfi_msi = 1; SYSCTL_INT(_hw_mfi, OID_AUTO, msi, CTLFLAG_RDTUN, &mfi_msi, 0, "Enable use of MSI interrupts"); static int mfi_mrsas_enable; SYSCTL_INT(_hw_mfi, OID_AUTO, mrsas_enable, CTLFLAG_RDTUN, &mfi_mrsas_enable, 0, "Allow mrasas to take newer cards"); struct mfi_ident { uint16_t vendor; uint16_t device; uint16_t subvendor; uint16_t subdevice; int flags; const char *desc; } mfi_identifiers[] = { {0x1000, 0x005b, 0x1028, 0x1f2d, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT| MFI_FLAGS_MRSAS, "Dell PERC H810 Adapter"}, {0x1000, 0x005b, 0x1028, 0x1f30, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT| MFI_FLAGS_MRSAS, "Dell PERC H710 Embedded"}, {0x1000, 0x005b, 0x1028, 0x1f31, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT| MFI_FLAGS_MRSAS, "Dell PERC H710P Adapter"}, {0x1000, 0x005b, 0x1028, 0x1f33, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT| MFI_FLAGS_MRSAS, "Dell PERC H710P Mini (blades)"}, {0x1000, 0x005b, 0x1028, 0x1f34, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT| MFI_FLAGS_MRSAS, "Dell PERC H710P Mini (monolithics)"}, {0x1000, 0x005b, 0x1028, 0x1f35, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT| MFI_FLAGS_MRSAS, "Dell PERC H710 Adapter"}, {0x1000, 0x005b, 0x1028, 0x1f37, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT| MFI_FLAGS_MRSAS, "Dell PERC H710 Mini (blades)"}, {0x1000, 0x005b, 0x1028, 0x1f38, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT| MFI_FLAGS_MRSAS, "Dell PERC H710 Mini (monolithics)"}, {0x1000, 0x005b, 0x8086, 0x9265, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT| MFI_FLAGS_MRSAS, "Intel (R) RAID Controller RS25DB080"}, {0x1000, 0x005b, 0x8086, 0x9285, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT| MFI_FLAGS_MRSAS, "Intel (R) RAID Controller RS25NB008"}, {0x1000, 0x005b, 0xffff, 0xffff, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT| MFI_FLAGS_MRSAS, "ThunderBolt"}, {0x1000, 0x005d, 0xffff, 0xffff, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT| MFI_FLAGS_MRSAS| MFI_FLAGS_INVADER, "Invader"}, {0x1000, 0x005f, 0xffff, 0xffff, MFI_FLAGS_SKINNY| MFI_FLAGS_TBOLT| MFI_FLAGS_MRSAS| MFI_FLAGS_FURY, "Fury"}, {0x1000, 0x0060, 0x1028, 0xffff, MFI_FLAGS_1078, "Dell PERC 6"}, {0x1000, 0x0060, 0xffff, 0xffff, MFI_FLAGS_1078, "LSI MegaSAS 1078"}, {0x1000, 0x0071, 0xffff, 0xffff, MFI_FLAGS_SKINNY, "Drake Skinny"}, {0x1000, 0x0073, 0xffff, 0xffff, MFI_FLAGS_SKINNY, "Drake Skinny"}, {0x1000, 0x0078, 0xffff, 0xffff, MFI_FLAGS_GEN2, "LSI MegaSAS Gen2"}, {0x1000, 0x0079, 0x1028, 0x1f15, MFI_FLAGS_GEN2, "Dell PERC H800 Adapter"}, {0x1000, 0x0079, 0x1028, 0x1f16, MFI_FLAGS_GEN2, "Dell PERC H700 Adapter"}, {0x1000, 0x0079, 0x1028, 0x1f17, MFI_FLAGS_GEN2, "Dell PERC H700 Integrated"}, {0x1000, 0x0079, 0x1028, 0x1f18, MFI_FLAGS_GEN2, "Dell PERC H700 Modular"}, {0x1000, 0x0079, 0x1028, 0x1f19, MFI_FLAGS_GEN2, "Dell PERC H700"}, {0x1000, 0x0079, 0x1028, 0x1f1a, MFI_FLAGS_GEN2, "Dell PERC H800 Proto Adapter"}, {0x1000, 0x0079, 0x1028, 0x1f1b, MFI_FLAGS_GEN2, "Dell PERC H800"}, {0x1000, 0x0079, 0x1028, 0xffff, MFI_FLAGS_GEN2, "Dell PERC Gen2"}, {0x1000, 0x0079, 0xffff, 0xffff, MFI_FLAGS_GEN2, "LSI MegaSAS Gen2"}, {0x1000, 0x007c, 0xffff, 0xffff, MFI_FLAGS_1078, "LSI MegaSAS 1078"}, {0x1000, 0x0411, 0xffff, 0xffff, MFI_FLAGS_1064R, "LSI MegaSAS 1064R"}, /* Brocton IOP */ {0x1000, 0x0413, 0xffff, 0xffff, MFI_FLAGS_1064R, "LSI MegaSAS 1064R"}, /* Verde ZCR */ {0x1028, 0x0015, 0xffff, 0xffff, MFI_FLAGS_1064R, "Dell PERC 5/i"}, {0, 0, 0, 0, 0, NULL} }; + +DRIVER_MODULE(mfi, pci, mfi_pci_driver, mfi_devclass, 0, 0); +MODULE_PNP_INFO("U16:vendor;U16:device;U16:subvendor;U16:subdevice", pci, mfi, + mfi_identifiers, nitems(mfi_identifiers) - 1); +MODULE_VERSION(mfi, 1); static struct mfi_ident * mfi_find_ident(device_t dev) { struct mfi_ident *m; for (m = mfi_identifiers; m->vendor != 0; m++) { if ((m->vendor == pci_get_vendor(dev)) && (m->device == pci_get_device(dev)) && ((m->subvendor == pci_get_subvendor(dev)) || (m->subvendor == 0xffff)) && ((m->subdevice == pci_get_subdevice(dev)) || (m->subdevice == 0xffff))) return (m); } return (NULL); } static int mfi_pci_probe(device_t dev) { struct mfi_ident *id; if ((id = mfi_find_ident(dev)) != NULL) { device_set_desc(dev, id->desc); /* give priority to mrsas if tunable set */ if ((id->flags & MFI_FLAGS_MRSAS) && mfi_mrsas_enable) return (BUS_PROBE_LOW_PRIORITY); else return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int mfi_pci_attach(device_t dev) { struct mfi_softc *sc; struct mfi_ident *m; int count, error; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->mfi_dev = dev; m = mfi_find_ident(dev); sc->mfi_flags = m->flags; /* Ensure busmastering is enabled */ pci_enable_busmaster(dev); /* Allocate PCI registers */ if ((sc->mfi_flags & MFI_FLAGS_1064R) || (sc->mfi_flags & MFI_FLAGS_1078)) { /* 1068/1078: Memory mapped BAR is at offset 0x10 */ sc->mfi_regs_rid = PCIR_BAR(0); } else if ((sc->mfi_flags & MFI_FLAGS_GEN2) || (sc->mfi_flags & MFI_FLAGS_SKINNY) || (sc->mfi_flags & MFI_FLAGS_TBOLT)) { /* Gen2/Skinny: Memory mapped BAR is at offset 0x14 */ sc->mfi_regs_rid = PCIR_BAR(1); } if ((sc->mfi_regs_resource = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_MEMORY, &sc->mfi_regs_rid, RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate PCI registers\n"); return (ENXIO); } sc->mfi_btag = rman_get_bustag(sc->mfi_regs_resource); sc->mfi_bhandle = rman_get_bushandle(sc->mfi_regs_resource); error = ENOMEM; /* Allocate parent DMA tag */ if (bus_dma_tag_create( bus_get_dma_tag(dev), /* PCI parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mfi_parent_dmat)) { device_printf(dev, "Cannot allocate parent DMA tag\n"); goto out; } /* Allocate IRQ resource. */ sc->mfi_irq_rid = 0; count = 1; if (mfi_msi && pci_alloc_msi(sc->mfi_dev, &count) == 0) { device_printf(sc->mfi_dev, "Using MSI\n"); sc->mfi_irq_rid = 1; } if ((sc->mfi_irq = bus_alloc_resource_any(sc->mfi_dev, SYS_RES_IRQ, &sc->mfi_irq_rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(sc->mfi_dev, "Cannot allocate interrupt\n"); error = EINVAL; goto out; } error = mfi_attach(sc); out: if (error) { mfi_free(sc); mfi_pci_free(sc); } return (error); } static int mfi_pci_detach(device_t dev) { struct mfi_softc *sc; int error, devcount, i; device_t *devlist; sc = device_get_softc(dev); sx_xlock(&sc->mfi_config_lock); mtx_lock(&sc->mfi_io_lock); if ((sc->mfi_flags & MFI_FLAGS_OPEN) != 0) { mtx_unlock(&sc->mfi_io_lock); sx_xunlock(&sc->mfi_config_lock); return (EBUSY); } sc->mfi_detaching = 1; mtx_unlock(&sc->mfi_io_lock); if ((error = device_get_children(sc->mfi_dev, &devlist, &devcount)) != 0) { sx_xunlock(&sc->mfi_config_lock); return error; } for (i = 0; i < devcount; i++) device_delete_child(sc->mfi_dev, devlist[i]); free(devlist, M_TEMP); sx_xunlock(&sc->mfi_config_lock); EVENTHANDLER_DEREGISTER(shutdown_final, sc->mfi_eh); mfi_shutdown(sc); mfi_free(sc); mfi_pci_free(sc); return (0); } static void mfi_pci_free(struct mfi_softc *sc) { if (sc->mfi_regs_resource != NULL) { bus_release_resource(sc->mfi_dev, SYS_RES_MEMORY, sc->mfi_regs_rid, sc->mfi_regs_resource); } if (sc->mfi_irq_rid != 0) pci_release_msi(sc->mfi_dev); return; } static int mfi_pci_suspend(device_t dev) { return (EINVAL); } static int mfi_pci_resume(device_t dev) { return (EINVAL); } Index: head/sys/dev/mpr/mpr_pci.c =================================================================== --- head/sys/dev/mpr/mpr_pci.c (revision 338948) +++ head/sys/dev/mpr/mpr_pci.c (revision 338949) @@ -1,456 +1,461 @@ /*- * Copyright (c) 2009 Yahoo! Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* PCI/PCI-X/PCIe bus interface for the Avago Tech (LSI) MPT3 controllers */ /* TODO Move headers to mprvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int mpr_pci_probe(device_t); static int mpr_pci_attach(device_t); static int mpr_pci_detach(device_t); static int mpr_pci_suspend(device_t); static int mpr_pci_resume(device_t); static void mpr_pci_free(struct mpr_softc *); static int mpr_alloc_msix(struct mpr_softc *sc, int msgs); static int mpr_alloc_msi(struct mpr_softc *sc, int msgs); static int mpr_pci_alloc_interrupts(struct mpr_softc *sc); static device_method_t mpr_methods[] = { DEVMETHOD(device_probe, mpr_pci_probe), DEVMETHOD(device_attach, mpr_pci_attach), DEVMETHOD(device_detach, mpr_pci_detach), DEVMETHOD(device_suspend, mpr_pci_suspend), DEVMETHOD(device_resume, mpr_pci_resume), DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), { 0, 0 } }; static driver_t mpr_pci_driver = { "mpr", mpr_methods, sizeof(struct mpr_softc) }; -static devclass_t mpr_devclass; -DRIVER_MODULE(mpr, pci, mpr_pci_driver, mpr_devclass, 0, 0); -MODULE_DEPEND(mpr, cam, 1, 1, 1); struct mpr_ident { uint16_t vendor; uint16_t device; uint16_t subvendor; uint16_t subdevice; u_int flags; const char *desc; } mpr_identifiers[] = { { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3004, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS3004" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3008, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS3008" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_1, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS3108_1" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_2, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS3108_2" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_5, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS3108_5" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI25_MFGPAGE_DEVID_SAS3108_6, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS3108_6" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3216, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS3216" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3224, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS3224" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_1, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS3316_1" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3316_2, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS3316_2" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_1, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS3324_1" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3324_2, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS3324_2" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3408, 0xffff, 0xffff, MPR_FLAGS_GEN35_IOC, "Avago Technologies (LSI) SAS3408" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3416, 0xffff, 0xffff, MPR_FLAGS_GEN35_IOC, "Avago Technologies (LSI) SAS3416" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508, 0xffff, 0xffff, MPR_FLAGS_GEN35_IOC, "Avago Technologies (LSI) SAS3508" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3508_1, 0xffff, 0xffff, MPR_FLAGS_GEN35_IOC, "Avago Technologies (LSI) SAS3508_1" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516, 0xffff, 0xffff, MPR_FLAGS_GEN35_IOC, "Avago Technologies (LSI) SAS3516" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3516_1, 0xffff, 0xffff, MPR_FLAGS_GEN35_IOC, "Avago Technologies (LSI) SAS3516_1" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3616, 0xffff, 0xffff, MPR_FLAGS_GEN35_IOC, "Avago Technologies (LSI) SAS3616" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3708, 0xffff, 0xffff, MPR_FLAGS_GEN35_IOC, "Avago Technologies (LSI) SAS3708" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI26_MFGPAGE_DEVID_SAS3716, 0xffff, 0xffff, MPR_FLAGS_GEN35_IOC, "Avago Technologies (LSI) SAS3716" }, { 0, 0, 0, 0, 0, NULL } }; + + +static devclass_t mpr_devclass; +DRIVER_MODULE(mpr, pci, mpr_pci_driver, mpr_devclass, 0, 0); +MODULE_PNP_INFO("U16:vendor;U16:device;U16:subvendor;U16:subdevice;D:#", pci, + mpr, mpr_identifiers, nitems(mpr_identifiers) - 1); + +MODULE_DEPEND(mpr, cam, 1, 1, 1); static struct mpr_ident * mpr_find_ident(device_t dev) { struct mpr_ident *m; for (m = mpr_identifiers; m->vendor != 0; m++) { if (m->vendor != pci_get_vendor(dev)) continue; if (m->device != pci_get_device(dev)) continue; if ((m->subvendor != 0xffff) && (m->subvendor != pci_get_subvendor(dev))) continue; if ((m->subdevice != 0xffff) && (m->subdevice != pci_get_subdevice(dev))) continue; return (m); } return (NULL); } static int mpr_pci_probe(device_t dev) { struct mpr_ident *id; if ((id = mpr_find_ident(dev)) != NULL) { device_set_desc(dev, id->desc); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int mpr_pci_attach(device_t dev) { struct mpr_softc *sc; struct mpr_ident *m; int error, i; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->mpr_dev = dev; m = mpr_find_ident(dev); sc->mpr_flags = m->flags; mpr_get_tunables(sc); /* Twiddle basic PCI config bits for a sanity check */ pci_enable_busmaster(dev); for (i = 0; i < PCI_MAXMAPS_0; i++) { sc->mpr_regs_rid = PCIR_BAR(i); if ((sc->mpr_regs_resource = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mpr_regs_rid, RF_ACTIVE)) != NULL) break; } if (sc->mpr_regs_resource == NULL) { mpr_printf(sc, "Cannot allocate PCI registers\n"); return (ENXIO); } sc->mpr_btag = rman_get_bustag(sc->mpr_regs_resource); sc->mpr_bhandle = rman_get_bushandle(sc->mpr_regs_resource); /* Allocate the parent DMA tag */ if (bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mpr_parent_dmat)) { mpr_printf(sc, "Cannot allocate parent DMA tag\n"); mpr_pci_free(sc); return (ENOMEM); } if (((error = mpr_pci_alloc_interrupts(sc)) != 0) || ((error = mpr_attach(sc)) != 0)) mpr_pci_free(sc); return (error); } /* * Allocate, but don't assign interrupts early. Doing it before requesting * the IOCFacts message informs the firmware that we want to do MSI-X * multiqueue. We might not use all of the available messages, but there's * no reason to re-alloc if we don't. */ int mpr_pci_alloc_interrupts(struct mpr_softc *sc) { device_t dev; int error, msgs; dev = sc->mpr_dev; error = 0; msgs = 0; if (sc->disable_msix == 0) { msgs = pci_msix_count(dev); mpr_dprint(sc, MPR_INIT, "Counted %d MSI-X messages\n", msgs); msgs = min(msgs, sc->max_msix); msgs = min(msgs, MPR_MSIX_MAX); msgs = min(msgs, 1); /* XXX */ if (msgs != 0) { mpr_dprint(sc, MPR_INIT, "Attempting to allocate %d " "MSI-X messages\n", msgs); error = mpr_alloc_msix(sc, msgs); } } if (((error != 0) || (msgs == 0)) && (sc->disable_msi == 0)) { msgs = pci_msi_count(dev); mpr_dprint(sc, MPR_INIT, "Counted %d MSI messages\n", msgs); msgs = min(msgs, MPR_MSI_MAX); if (msgs != 0) { mpr_dprint(sc, MPR_INIT, "Attempting to allocated %d " "MSI messages\n", MPR_MSI_MAX); error = mpr_alloc_msi(sc, MPR_MSI_MAX); } } if ((error != 0) || (msgs == 0)) { /* * If neither MSI or MSI-X are available, assume legacy INTx. * This also implies that there will be only 1 queue. */ mpr_dprint(sc, MPR_INIT, "Falling back to legacy INTx\n"); sc->mpr_flags |= MPR_FLAGS_INTX; msgs = 1; } else sc->mpr_flags |= MPR_FLAGS_MSI; sc->msi_msgs = msgs; mpr_dprint(sc, MPR_INIT, "Allocated %d interrupts\n", msgs); return (error); } int mpr_pci_setup_interrupts(struct mpr_softc *sc) { device_t dev; struct mpr_queue *q; void *ihandler; int i, error, rid, initial_rid; dev = sc->mpr_dev; error = ENXIO; if (sc->mpr_flags & MPR_FLAGS_INTX) { initial_rid = 0; ihandler = mpr_intr; } else if (sc->mpr_flags & MPR_FLAGS_MSI) { initial_rid = 1; ihandler = mpr_intr_msi; } else { mpr_dprint(sc, MPR_ERROR|MPR_INIT, "Unable to set up interrupts\n"); return (EINVAL); } for (i = 0; i < sc->msi_msgs; i++) { q = &sc->queues[i]; rid = i + initial_rid; q->irq_rid = rid; q->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &q->irq_rid, RF_ACTIVE); if (q->irq == NULL) { mpr_dprint(sc, MPR_ERROR|MPR_INIT, "Cannot allocate interrupt RID %d\n", rid); sc->msi_msgs = i; break; } error = bus_setup_intr(dev, q->irq, INTR_TYPE_BIO | INTR_MPSAFE, NULL, ihandler, sc, &q->intrhand); if (error) { mpr_dprint(sc, MPR_ERROR|MPR_INIT, "Cannot setup interrupt RID %d\n", rid); sc->msi_msgs = i; break; } } mpr_dprint(sc, MPR_INIT, "Set up %d interrupts\n", sc->msi_msgs); return (error); } static int mpr_pci_detach(device_t dev) { struct mpr_softc *sc; int error; sc = device_get_softc(dev); if ((error = mpr_free(sc)) != 0) return (error); mpr_pci_free(sc); return (0); } void mpr_pci_free_interrupts(struct mpr_softc *sc) { struct mpr_queue *q; int i; if (sc->queues == NULL) return; for (i = 0; i < sc->msi_msgs; i++) { q = &sc->queues[i]; if (q->irq != NULL) { bus_teardown_intr(sc->mpr_dev, q->irq, q->intrhand); bus_release_resource(sc->mpr_dev, SYS_RES_IRQ, q->irq_rid, q->irq); } } } static void mpr_pci_free(struct mpr_softc *sc) { if (sc->mpr_parent_dmat != NULL) { bus_dma_tag_destroy(sc->mpr_parent_dmat); } mpr_pci_free_interrupts(sc); if (sc->mpr_flags & MPR_FLAGS_MSI) pci_release_msi(sc->mpr_dev); if (sc->mpr_regs_resource != NULL) { bus_release_resource(sc->mpr_dev, SYS_RES_MEMORY, sc->mpr_regs_rid, sc->mpr_regs_resource); } return; } static int mpr_pci_suspend(device_t dev) { return (EINVAL); } static int mpr_pci_resume(device_t dev) { return (EINVAL); } static int mpr_alloc_msix(struct mpr_softc *sc, int msgs) { int error; error = pci_alloc_msix(sc->mpr_dev, &msgs); return (error); } static int mpr_alloc_msi(struct mpr_softc *sc, int msgs) { int error; error = pci_alloc_msi(sc->mpr_dev, &msgs); return (error); } int mpr_pci_restore(struct mpr_softc *sc) { struct pci_devinfo *dinfo; mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); dinfo = device_get_ivars(sc->mpr_dev); if (dinfo == NULL) { mpr_dprint(sc, MPR_FAULT, "%s: NULL dinfo\n", __func__); return (EINVAL); } pci_cfg_restore(sc->mpr_dev, dinfo); return (0); } Index: head/sys/dev/mps/mps_pci.c =================================================================== --- head/sys/dev/mps/mps_pci.c (revision 338948) +++ head/sys/dev/mps/mps_pci.c (revision 338949) @@ -1,444 +1,444 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2009 Yahoo! Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* PCI/PCI-X/PCIe bus interface for the Avago Tech (LSI) MPT2 controllers */ /* TODO Move headers to mpsvar */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include static int mps_pci_probe(device_t); static int mps_pci_attach(device_t); static int mps_pci_detach(device_t); static int mps_pci_suspend(device_t); static int mps_pci_resume(device_t); static void mps_pci_free(struct mps_softc *); static int mps_alloc_msix(struct mps_softc *sc, int msgs); static int mps_alloc_msi(struct mps_softc *sc, int msgs); static int mps_pci_alloc_interrupts(struct mps_softc *sc); static device_method_t mps_methods[] = { DEVMETHOD(device_probe, mps_pci_probe), DEVMETHOD(device_attach, mps_pci_attach), DEVMETHOD(device_detach, mps_pci_detach), DEVMETHOD(device_suspend, mps_pci_suspend), DEVMETHOD(device_resume, mps_pci_resume), DEVMETHOD_END }; static driver_t mps_pci_driver = { "mps", mps_methods, sizeof(struct mps_softc) }; -static devclass_t mps_devclass; -DRIVER_MODULE(mps, pci, mps_pci_driver, mps_devclass, 0, 0); -MODULE_DEPEND(mps, cam, 1, 1, 1); - struct mps_ident { uint16_t vendor; uint16_t device; uint16_t subvendor; uint16_t subdevice; u_int flags; const char *desc; } mps_identifiers[] = { { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2004, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2004" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2008, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2008" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_1, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2108" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_2, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2108" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2108_3, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2108" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_1, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2116" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2116_2, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2116" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_1, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2208" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_2, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2208" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_3, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2208" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_4, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2208" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_5, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2208" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2208_6, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2208" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_1, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2308" }, // Add Customer specific vender/subdevice id before generic // (0xffff) vender/subdevice id. { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, 0x8086, 0x3516, 0, "Intel(R) Integrated RAID Module RMS25JB080" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, 0x8086, 0x3517, 0, "Intel(R) Integrated RAID Module RMS25JB040" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, 0x8086, 0x3518, 0, "Intel(R) Integrated RAID Module RMS25KB080" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, 0x8086, 0x3519, 0, "Intel(R) Integrated RAID Module RMS25KB040" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_2, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2308" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SAS2308_3, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SAS2308" }, { MPI2_MFGPAGE_VENDORID_LSI, MPI2_MFGPAGE_DEVID_SSS6200, 0xffff, 0xffff, 0, "Avago Technologies (LSI) SSS6200" }, { 0, 0, 0, 0, 0, NULL } }; +static devclass_t mps_devclass; +DRIVER_MODULE(mps, pci, mps_pci_driver, mps_devclass, 0, 0); +MODULE_PNP_INFO("U16:vendor;U16:device;U16:subvendor;U16:subdevice", pci, mps, + mps_identifiers, nitems(mps_identifiers) - 1); static struct mps_ident * mps_find_ident(device_t dev) { struct mps_ident *m; for (m = mps_identifiers; m->vendor != 0; m++) { if (m->vendor != pci_get_vendor(dev)) continue; if (m->device != pci_get_device(dev)) continue; if ((m->subvendor != 0xffff) && (m->subvendor != pci_get_subvendor(dev))) continue; if ((m->subdevice != 0xffff) && (m->subdevice != pci_get_subdevice(dev))) continue; return (m); } return (NULL); } static int mps_pci_probe(device_t dev) { struct mps_ident *id; if ((id = mps_find_ident(dev)) != NULL) { device_set_desc(dev, id->desc); return (BUS_PROBE_DEFAULT); } return (ENXIO); } static int mps_pci_attach(device_t dev) { struct mps_softc *sc; struct mps_ident *m; int error; sc = device_get_softc(dev); bzero(sc, sizeof(*sc)); sc->mps_dev = dev; m = mps_find_ident(dev); sc->mps_flags = m->flags; mps_get_tunables(sc); /* Twiddle basic PCI config bits for a sanity check */ pci_enable_busmaster(dev); /* Allocate the System Interface Register Set */ sc->mps_regs_rid = PCIR_BAR(1); if ((sc->mps_regs_resource = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mps_regs_rid, RF_ACTIVE)) == NULL) { mps_printf(sc, "Cannot allocate PCI registers\n"); return (ENXIO); } sc->mps_btag = rman_get_bustag(sc->mps_regs_resource); sc->mps_bhandle = rman_get_bushandle(sc->mps_regs_resource); /* Allocate the parent DMA tag */ if (bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */ 1, 0, /* algnmnt, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->mps_parent_dmat)) { mps_printf(sc, "Cannot allocate parent DMA tag\n"); mps_pci_free(sc); return (ENOMEM); } if (((error = mps_pci_alloc_interrupts(sc)) != 0) || ((error = mps_attach(sc)) != 0)) mps_pci_free(sc); return (error); } /* * Allocate, but don't assign interrupts early. Doing it before requesting * the IOCFacts message informs the firmware that we want to do MSI-X * multiqueue. We might not use all of the available messages, but there's * no reason to re-alloc if we don't. */ static int mps_pci_alloc_interrupts(struct mps_softc *sc) { device_t dev; int error, msgs; dev = sc->mps_dev; error = 0; msgs = 0; if (sc->disable_msix == 0) { msgs = pci_msix_count(dev); mps_dprint(sc, MPS_INIT, "Counted %d MSI-X messages\n", msgs); msgs = min(msgs, sc->max_msix); msgs = min(msgs, MPS_MSIX_MAX); msgs = min(msgs, 1); /* XXX */ if (msgs != 0) { mps_dprint(sc, MPS_INIT, "Attempting to allocate %d " "MSI-X messages\n", msgs); error = mps_alloc_msix(sc, msgs); } } if (((error != 0) || (msgs == 0)) && (sc->disable_msi == 0)) { msgs = pci_msi_count(dev); mps_dprint(sc, MPS_INIT, "Counted %d MSI messages\n", msgs); msgs = min(msgs, MPS_MSI_MAX); if (msgs != 0) { mps_dprint(sc, MPS_INIT, "Attempting to allocate %d " "MSI messages\n", MPS_MSI_MAX); error = mps_alloc_msi(sc, MPS_MSI_MAX); } } if ((error != 0) || (msgs == 0)) { /* * If neither MSI or MSI-X are avaiable, assume legacy INTx. * This also implies that there will be only 1 queue. */ mps_dprint(sc, MPS_INIT, "Falling back to legacy INTx\n"); sc->mps_flags |= MPS_FLAGS_INTX; msgs = 1; } else sc->mps_flags |= MPS_FLAGS_MSI; sc->msi_msgs = msgs; mps_dprint(sc, MPS_INIT, "Allocated %d interrupts\n", msgs); return (error); } int mps_pci_setup_interrupts(struct mps_softc *sc) { device_t dev; struct mps_queue *q; void *ihandler; int i, error, rid, initial_rid; dev = sc->mps_dev; error = ENXIO; if (sc->mps_flags & MPS_FLAGS_INTX) { initial_rid = 0; ihandler = mps_intr; } else if (sc->mps_flags & MPS_FLAGS_MSI) { initial_rid = 1; ihandler = mps_intr_msi; } else { mps_dprint(sc, MPS_ERROR|MPS_INIT, "Unable to set up interrupts\n"); return (EINVAL); } for (i = 0; i < sc->msi_msgs; i++) { q = &sc->queues[i]; rid = i + initial_rid; q->irq_rid = rid; q->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &q->irq_rid, RF_ACTIVE); if (q->irq == NULL) { mps_dprint(sc, MPS_ERROR|MPS_INIT, "Cannot allocate interrupt RID %d\n", rid); sc->msi_msgs = i; break; } error = bus_setup_intr(dev, q->irq, INTR_TYPE_BIO | INTR_MPSAFE, NULL, ihandler, sc, &q->intrhand); if (error) { mps_dprint(sc, MPS_ERROR|MPS_INIT, "Cannot setup interrupt RID %d\n", rid); sc->msi_msgs = i; break; } } mps_dprint(sc, MPS_INIT, "Set up %d interrupts\n", sc->msi_msgs); return (error); } static int mps_pci_detach(device_t dev) { struct mps_softc *sc; int error; sc = device_get_softc(dev); if ((error = mps_free(sc)) != 0) return (error); mps_pci_free(sc); return (0); } void mps_pci_free_interrupts(struct mps_softc *sc) { struct mps_queue *q; int i; if (sc->queues == NULL) return; for (i = 0; i < sc->msi_msgs; i++) { q = &sc->queues[i]; if (q->irq != NULL) { bus_teardown_intr(sc->mps_dev, q->irq, q->intrhand); bus_release_resource(sc->mps_dev, SYS_RES_IRQ, q->irq_rid, q->irq); } } } static void mps_pci_free(struct mps_softc *sc) { if (sc->mps_parent_dmat != NULL) { bus_dma_tag_destroy(sc->mps_parent_dmat); } mps_pci_free_interrupts(sc); if (sc->mps_flags & MPS_FLAGS_MSI) pci_release_msi(sc->mps_dev); if (sc->mps_regs_resource != NULL) { bus_release_resource(sc->mps_dev, SYS_RES_MEMORY, sc->mps_regs_rid, sc->mps_regs_resource); } return; } static int mps_pci_suspend(device_t dev) { return (EINVAL); } static int mps_pci_resume(device_t dev) { return (EINVAL); } static int mps_alloc_msix(struct mps_softc *sc, int msgs) { int error; error = pci_alloc_msix(sc->mps_dev, &msgs); return (error); } static int mps_alloc_msi(struct mps_softc *sc, int msgs) { int error; error = pci_alloc_msi(sc->mps_dev, &msgs); return (error); } int mps_pci_restore(struct mps_softc *sc) { struct pci_devinfo *dinfo; mps_dprint(sc, MPS_TRACE, "%s\n", __func__); dinfo = device_get_ivars(sc->mps_dev); if (dinfo == NULL) { mps_dprint(sc, MPS_FAULT, "%s: NULL dinfo\n", __func__); return (EINVAL); } pci_cfg_restore(sc->mps_dev, dinfo); return (0); } Index: head/sys/dev/mvs/mvs_pci.c =================================================================== --- head/sys/dev/mvs/mvs_pci.c (revision 338948) +++ head/sys/dev/mvs/mvs_pci.c (revision 338949) @@ -1,526 +1,528 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2010 Alexander Motin * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "mvs.h" /* local prototypes */ static int mvs_setup_interrupt(device_t dev); static void mvs_intr(void *data); static int mvs_suspend(device_t dev); static int mvs_resume(device_t dev); static int mvs_ctlr_setup(device_t dev); static struct { uint32_t id; uint8_t rev; const char *name; int ports; int quirks; } mvs_ids[] = { {0x504011ab, 0x00, "Marvell 88SX5040", 4, MVS_Q_GENI}, {0x504111ab, 0x00, "Marvell 88SX5041", 4, MVS_Q_GENI}, {0x508011ab, 0x00, "Marvell 88SX5080", 8, MVS_Q_GENI}, {0x508111ab, 0x00, "Marvell 88SX5081", 8, MVS_Q_GENI}, {0x604011ab, 0x00, "Marvell 88SX6040", 4, MVS_Q_GENII}, {0x604111ab, 0x00, "Marvell 88SX6041", 4, MVS_Q_GENII}, {0x604211ab, 0x00, "Marvell 88SX6042", 4, MVS_Q_GENIIE}, {0x608011ab, 0x00, "Marvell 88SX6080", 8, MVS_Q_GENII}, {0x608111ab, 0x00, "Marvell 88SX6081", 8, MVS_Q_GENII}, {0x704211ab, 0x00, "Marvell 88SX7042", 4, MVS_Q_GENIIE|MVS_Q_CT}, {0x02419005, 0x00, "Adaptec 1420SA", 4, MVS_Q_GENII}, {0x02439005, 0x00, "Adaptec 1430SA", 4, MVS_Q_GENIIE|MVS_Q_CT}, {0x00000000, 0x00, NULL, 0, 0} }; static int mvs_probe(device_t dev) { char buf[64]; int i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); for (i = 0; mvs_ids[i].id != 0; i++) { if (mvs_ids[i].id == devid && mvs_ids[i].rev <= revid) { snprintf(buf, sizeof(buf), "%s SATA controller", mvs_ids[i].name); device_set_desc_copy(dev, buf); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static int mvs_attach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); device_t child; int error, unit, i; uint32_t devid = pci_get_devid(dev); uint8_t revid = pci_get_revid(dev); ctlr->dev = dev; i = 0; while (mvs_ids[i].id != 0 && (mvs_ids[i].id != devid || mvs_ids[i].rev > revid)) i++; ctlr->channels = mvs_ids[i].ports; ctlr->quirks = mvs_ids[i].quirks; ctlr->ccc = 0; resource_int_value(device_get_name(dev), device_get_unit(dev), "ccc", &ctlr->ccc); ctlr->cccc = 8; resource_int_value(device_get_name(dev), device_get_unit(dev), "cccc", &ctlr->cccc); if (ctlr->ccc == 0 || ctlr->cccc == 0) { ctlr->ccc = 0; ctlr->cccc = 0; } if (ctlr->ccc > 100000) ctlr->ccc = 100000; device_printf(dev, "Gen-%s, %d %sGbps ports, Port Multiplier %s%s\n", ((ctlr->quirks & MVS_Q_GENI) ? "I" : ((ctlr->quirks & MVS_Q_GENII) ? "II" : "IIe")), ctlr->channels, ((ctlr->quirks & MVS_Q_GENI) ? "1.5" : "3"), ((ctlr->quirks & MVS_Q_GENI) ? "not supported" : "supported"), ((ctlr->quirks & MVS_Q_GENIIE) ? " with FBS" : "")); mtx_init(&ctlr->mtx, "MVS controller lock", NULL, MTX_DEF); /* We should have a memory BAR(0). */ ctlr->r_rid = PCIR_BAR(0); if (!(ctlr->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &ctlr->r_rid, RF_ACTIVE))) return ENXIO; /* Setup our own memory management for channels. */ ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); ctlr->sc_iomem.rm_type = RMAN_ARRAY; ctlr->sc_iomem.rm_descr = "I/O memory addresses"; if ((error = rman_init(&ctlr->sc_iomem)) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); return (error); } if ((error = rman_manage_region(&ctlr->sc_iomem, rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return (error); } pci_enable_busmaster(dev); mvs_ctlr_setup(dev); /* Setup interrupts. */ if (mvs_setup_interrupt(dev)) { bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); rman_fini(&ctlr->sc_iomem); return ENXIO; } /* Attach all channels on this controller */ for (unit = 0; unit < ctlr->channels; unit++) { child = device_add_child(dev, "mvsch", -1); if (child == NULL) device_printf(dev, "failed to add channel device\n"); else device_set_ivars(child, (void *)(intptr_t)unit); } bus_generic_attach(dev); return 0; } static int mvs_detach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); /* Detach & delete all children */ device_delete_children(dev); /* Free interrupt. */ if (ctlr->irq.r_irq) { bus_teardown_intr(dev, ctlr->irq.r_irq, ctlr->irq.handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); } pci_release_msi(dev); /* Free memory. */ rman_fini(&ctlr->sc_iomem); if (ctlr->r_mem) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); mtx_destroy(&ctlr->mtx); return (0); } static int mvs_ctlr_setup(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); int i, ccc = ctlr->ccc, cccc = ctlr->cccc, ccim = 0; /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_MIM, 0x00000000); /* Mask PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIM, 0x00000000); /* Clear PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIC, 0x00000000); if (ccc && bootverbose) { device_printf(dev, "CCC with %dus/%dcmd enabled\n", ctlr->ccc, ctlr->cccc); } ccc *= 150; /* Configure chip-global CCC */ if (ctlr->channels > 4 && (ctlr->quirks & MVS_Q_GENI) == 0) { ATA_OUTL(ctlr->r_mem, CHIP_ICT, cccc); ATA_OUTL(ctlr->r_mem, CHIP_ITT, ccc); ATA_OUTL(ctlr->r_mem, CHIP_ICC, ~CHIP_ICC_ALL_PORTS); if (ccc) ccim |= IC_ALL_PORTS_COAL_DONE; ccc = 0; cccc = 0; } for (i = 0; i < ctlr->channels / 4; i++) { /* Configure per-HC CCC */ ATA_OUTL(ctlr->r_mem, HC_BASE(i) + HC_ICT, cccc); ATA_OUTL(ctlr->r_mem, HC_BASE(i) + HC_ITT, ccc); if (ccc) ccim |= (IC_HC0_COAL_DONE << (i * IC_HC_SHIFT)); /* Clear HC interrupts */ ATA_OUTL(ctlr->r_mem, HC_BASE(i) + HC_IC, 0x00000000); } /* Enable chip interrupts */ ctlr->gmim = (ccim ? ccim : (IC_DONE_HC0 | IC_DONE_HC1)) | IC_ERR_HC0 | IC_ERR_HC1; ctlr->mim = ctlr->gmim | ctlr->pmim; ATA_OUTL(ctlr->r_mem, CHIP_MIM, ctlr->mim); /* Enable PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIM, 0x007fffff); return (0); } static void mvs_edma(device_t dev, device_t child, int mode) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; int bit = IC_DONE_IRQ << (unit * 2 + unit / 4) ; if (ctlr->ccc == 0) return; /* CCC is not working for non-EDMA mode. Unmask device interrupts. */ mtx_lock(&ctlr->mtx); if (mode == MVS_EDMA_OFF) ctlr->pmim |= bit; else ctlr->pmim &= ~bit; ctlr->mim = ctlr->gmim | ctlr->pmim; if (!ctlr->msia) ATA_OUTL(ctlr->r_mem, CHIP_MIM, ctlr->mim); mtx_unlock(&ctlr->mtx); } static int mvs_suspend(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); bus_generic_suspend(dev); /* Mask chip interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_MIM, 0x00000000); /* Mask PCI interrupts */ ATA_OUTL(ctlr->r_mem, CHIP_PCIIM, 0x00000000); return 0; } static int mvs_resume(device_t dev) { mvs_ctlr_setup(dev); return (bus_generic_resume(dev)); } static int mvs_setup_interrupt(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); int msi = 0; /* Process hints. */ resource_int_value(device_get_name(dev), device_get_unit(dev), "msi", &msi); if (msi < 0) msi = 0; else if (msi > 0) msi = min(1, pci_msi_count(dev)); /* Allocate MSI if needed/present. */ if (msi && pci_alloc_msi(dev, &msi) != 0) msi = 0; ctlr->msi = msi; /* Allocate all IRQs. */ ctlr->irq.r_irq_rid = msi ? 1 : 0; if (!(ctlr->irq.r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &ctlr->irq.r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { device_printf(dev, "unable to map interrupt\n"); return (ENXIO); } if ((bus_setup_intr(dev, ctlr->irq.r_irq, ATA_INTR_FLAGS, NULL, mvs_intr, ctlr, &ctlr->irq.handle))) { device_printf(dev, "unable to setup interrupt\n"); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); ctlr->irq.r_irq = NULL; return (ENXIO); } return (0); } /* * Common case interrupt handler. */ static void mvs_intr(void *data) { struct mvs_controller *ctlr = data; struct mvs_intr_arg arg; void (*function)(void *); int p; u_int32_t ic, aic; ic = ATA_INL(ctlr->r_mem, CHIP_MIC); if (ctlr->msi) { /* We have to mask MSI during processing. */ mtx_lock(&ctlr->mtx); ATA_OUTL(ctlr->r_mem, CHIP_MIM, 0); ctlr->msia = 1; /* Deny MIM update during processing. */ mtx_unlock(&ctlr->mtx); } else if (ic == 0) return; /* Acknowledge all-ports CCC interrupt. */ if (ic & IC_ALL_PORTS_COAL_DONE) ATA_OUTL(ctlr->r_mem, CHIP_ICC, ~CHIP_ICC_ALL_PORTS); for (p = 0; p < ctlr->channels; p++) { if ((p & 3) == 0) { if (p != 0) ic >>= 1; if ((ic & IC_HC0) == 0) { p += 3; ic >>= 8; continue; } /* Acknowledge interrupts of this HC. */ aic = 0; if (ic & (IC_DONE_IRQ << 0)) aic |= HC_IC_DONE(0) | HC_IC_DEV(0); if (ic & (IC_DONE_IRQ << 2)) aic |= HC_IC_DONE(1) | HC_IC_DEV(1); if (ic & (IC_DONE_IRQ << 4)) aic |= HC_IC_DONE(2) | HC_IC_DEV(2); if (ic & (IC_DONE_IRQ << 6)) aic |= HC_IC_DONE(3) | HC_IC_DEV(3); if (ic & IC_HC0_COAL_DONE) aic |= HC_IC_COAL; ATA_OUTL(ctlr->r_mem, HC_BASE(p == 4) + HC_IC, ~aic); } /* Call per-port interrupt handler. */ arg.cause = ic & (IC_ERR_IRQ|IC_DONE_IRQ); if ((arg.cause != 0) && (function = ctlr->interrupt[p].function)) { arg.arg = ctlr->interrupt[p].argument; function(&arg); } ic >>= 2; } if (ctlr->msi) { /* Unmasking MSI triggers next interrupt, if needed. */ mtx_lock(&ctlr->mtx); ctlr->msia = 0; /* Allow MIM update. */ ATA_OUTL(ctlr->r_mem, CHIP_MIM, ctlr->mim); mtx_unlock(&ctlr->mtx); } } static struct resource * mvs_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = ((struct mvs_channel *)device_get_softc(child))->unit; struct resource *res = NULL; int offset = HC_BASE(unit >> 2) + PORT_BASE(unit & 0x03); rman_res_t st; switch (type) { case SYS_RES_MEMORY: st = rman_get_start(ctlr->r_mem); res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, st + offset + PORT_SIZE - 1, PORT_SIZE, RF_ACTIVE, child); if (res) { bus_space_handle_t bsh; bus_space_tag_t bst; bsh = rman_get_bushandle(ctlr->r_mem); bst = rman_get_bustag(ctlr->r_mem); bus_space_subregion(bst, bsh, offset, PORT_SIZE, &bsh); rman_set_bushandle(res, bsh); rman_set_bustag(res, bst); } break; case SYS_RES_IRQ: if (*rid == ATA_IRQ_RID) res = ctlr->irq.r_irq; break; } return (res); } static int mvs_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { switch (type) { case SYS_RES_MEMORY: rman_release_resource(r); return (0); case SYS_RES_IRQ: if (rid != ATA_IRQ_RID) return ENOENT; return (0); } return (EINVAL); } static int mvs_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *function, void *argument, void **cookiep) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); if (filter != NULL) { printf("mvs.c: we cannot use a filter here\n"); return (EINVAL); } ctlr->interrupt[unit].function = function; ctlr->interrupt[unit].argument = argument; return (0); } static int mvs_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie) { struct mvs_controller *ctlr = device_get_softc(dev); int unit = (intptr_t)device_get_ivars(child); ctlr->interrupt[unit].function = NULL; ctlr->interrupt[unit].argument = NULL; return (0); } static int mvs_print_child(device_t dev, device_t child) { int retval; retval = bus_print_child_header(dev, child); retval += printf(" at channel %d", (int)(intptr_t)device_get_ivars(child)); retval += bus_print_child_footer(dev, child); return (retval); } static int mvs_child_location_str(device_t dev, device_t child, char *buf, size_t buflen) { snprintf(buf, buflen, "channel=%d", (int)(intptr_t)device_get_ivars(child)); return (0); } static bus_dma_tag_t mvs_get_dma_tag(device_t bus, device_t child) { return (bus_get_dma_tag(bus)); } static device_method_t mvs_methods[] = { DEVMETHOD(device_probe, mvs_probe), DEVMETHOD(device_attach, mvs_attach), DEVMETHOD(device_detach, mvs_detach), DEVMETHOD(device_suspend, mvs_suspend), DEVMETHOD(device_resume, mvs_resume), DEVMETHOD(bus_print_child, mvs_print_child), DEVMETHOD(bus_alloc_resource, mvs_alloc_resource), DEVMETHOD(bus_release_resource, mvs_release_resource), DEVMETHOD(bus_setup_intr, mvs_setup_intr), DEVMETHOD(bus_teardown_intr,mvs_teardown_intr), DEVMETHOD(bus_child_location_str, mvs_child_location_str), DEVMETHOD(bus_get_dma_tag, mvs_get_dma_tag), DEVMETHOD(mvs_edma, mvs_edma), { 0, 0 } }; static driver_t mvs_driver = { "mvs", mvs_methods, sizeof(struct mvs_controller) }; DRIVER_MODULE(mvs, pci, mvs_driver, mvs_devclass, 0, 0); +MODULE_PNP_INFO("W32:vendor/device", pci, mvs, mvs_ids, + nitems(mvs_ids) - 1); MODULE_VERSION(mvs, 1); MODULE_DEPEND(mvs, cam, 1, 1, 1); Index: head/sys/dev/my/if_my.c =================================================================== --- head/sys/dev/my/if_my.c (revision 338948) +++ head/sys/dev/my/if_my.c (revision 338949) @@ -1,1776 +1,1778 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Written by: yen_cw@myson.com.tw * Copyright (c) 2002 Myson Technology Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * Myson fast ethernet PCI NIC driver, available at: http://www.myson.com.tw/ */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #define NBPFILTER 1 #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include /* * #define MY_USEIOSPACE */ static int MY_USEIOSPACE = 1; #ifdef MY_USEIOSPACE #define MY_RES SYS_RES_IOPORT #define MY_RID MY_PCI_LOIO #else #define MY_RES SYS_RES_MEMORY #define MY_RID MY_PCI_LOMEM #endif #include /* * Various supported device vendors/types and their names. */ struct my_type *my_info_tmp; static struct my_type my_devs[] = { {MYSONVENDORID, MTD800ID, "Myson MTD80X Based Fast Ethernet Card"}, {MYSONVENDORID, MTD803ID, "Myson MTD80X Based Fast Ethernet Card"}, {MYSONVENDORID, MTD891ID, "Myson MTD89X Based Giga Ethernet Card"}, {0, 0, NULL} }; /* * Various supported PHY vendors/types and their names. Note that this driver * will work with pretty much any MII-compliant PHY, so failure to positively * identify the chip is not a fatal error. */ static struct my_type my_phys[] = { {MysonPHYID0, MysonPHYID0, ""}, {SeeqPHYID0, SeeqPHYID0, ""}, {AhdocPHYID0, AhdocPHYID0, ""}, {MarvellPHYID0, MarvellPHYID0, ""}, {LevelOnePHYID0, LevelOnePHYID0, ""}, {0, 0, ""} }; static int my_probe(device_t); static int my_attach(device_t); static int my_detach(device_t); static int my_newbuf(struct my_softc *, struct my_chain_onefrag *); static int my_encap(struct my_softc *, struct my_chain *, struct mbuf *); static void my_rxeof(struct my_softc *); static void my_txeof(struct my_softc *); static void my_txeoc(struct my_softc *); static void my_intr(void *); static void my_start(struct ifnet *); static void my_start_locked(struct ifnet *); static int my_ioctl(struct ifnet *, u_long, caddr_t); static void my_init(void *); static void my_init_locked(struct my_softc *); static void my_stop(struct my_softc *); static void my_autoneg_timeout(void *); static void my_watchdog(void *); static int my_shutdown(device_t); static int my_ifmedia_upd(struct ifnet *); static void my_ifmedia_sts(struct ifnet *, struct ifmediareq *); static u_int16_t my_phy_readreg(struct my_softc *, int); static void my_phy_writereg(struct my_softc *, int, int); static void my_autoneg_xmit(struct my_softc *); static void my_autoneg_mii(struct my_softc *, int, int); static void my_setmode_mii(struct my_softc *, int); static void my_getmode_mii(struct my_softc *); static void my_setcfg(struct my_softc *, int); static void my_setmulti(struct my_softc *); static void my_reset(struct my_softc *); static int my_list_rx_init(struct my_softc *); static int my_list_tx_init(struct my_softc *); static long my_send_cmd_to_phy(struct my_softc *, int, int); #define MY_SETBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) | (x)) #define MY_CLRBIT(sc, reg, x) CSR_WRITE_4(sc, reg, CSR_READ_4(sc, reg) & ~(x)) static device_method_t my_methods[] = { /* Device interface */ DEVMETHOD(device_probe, my_probe), DEVMETHOD(device_attach, my_attach), DEVMETHOD(device_detach, my_detach), DEVMETHOD(device_shutdown, my_shutdown), DEVMETHOD_END }; static driver_t my_driver = { "my", my_methods, sizeof(struct my_softc) }; static devclass_t my_devclass; DRIVER_MODULE(my, pci, my_driver, my_devclass, 0, 0); +MODULE_PNP_INFO("U16:vendor;U16:device;D:#", pci, my, my_devs, + nitems(my_devs) - 1); MODULE_DEPEND(my, pci, 1, 1, 1); MODULE_DEPEND(my, ether, 1, 1, 1); static long my_send_cmd_to_phy(struct my_softc * sc, int opcode, int regad) { long miir; int i; int mask, data; MY_LOCK_ASSERT(sc); /* enable MII output */ miir = CSR_READ_4(sc, MY_MANAGEMENT); miir &= 0xfffffff0; miir |= MY_MASK_MIIR_MII_WRITE + MY_MASK_MIIR_MII_MDO; /* send 32 1's preamble */ for (i = 0; i < 32; i++) { /* low MDC; MDO is already high (miir) */ miir &= ~MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); /* high MDC */ miir |= MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); } /* calculate ST+OP+PHYAD+REGAD+TA */ data = opcode | (sc->my_phy_addr << 7) | (regad << 2); /* sent out */ mask = 0x8000; while (mask) { /* low MDC, prepare MDO */ miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); if (mask & data) miir |= MY_MASK_MIIR_MII_MDO; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); /* high MDC */ miir |= MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); DELAY(30); /* next */ mask >>= 1; if (mask == 0x2 && opcode == MY_OP_READ) miir &= ~MY_MASK_MIIR_MII_WRITE; } return miir; } static u_int16_t my_phy_readreg(struct my_softc * sc, int reg) { long miir; int mask, data; MY_LOCK_ASSERT(sc); if (sc->my_info->my_did == MTD803ID) data = CSR_READ_2(sc, MY_PHYBASE + reg * 2); else { miir = my_send_cmd_to_phy(sc, MY_OP_READ, reg); /* read data */ mask = 0x8000; data = 0; while (mask) { /* low MDC */ miir &= ~MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); /* read MDI */ miir = CSR_READ_4(sc, MY_MANAGEMENT); if (miir & MY_MASK_MIIR_MII_MDI) data |= mask; /* high MDC, and wait */ miir |= MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); DELAY(30); /* next */ mask >>= 1; } /* low MDC */ miir &= ~MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); } return (u_int16_t) data; } static void my_phy_writereg(struct my_softc * sc, int reg, int data) { long miir; int mask; MY_LOCK_ASSERT(sc); if (sc->my_info->my_did == MTD803ID) CSR_WRITE_2(sc, MY_PHYBASE + reg * 2, data); else { miir = my_send_cmd_to_phy(sc, MY_OP_WRITE, reg); /* write data */ mask = 0x8000; while (mask) { /* low MDC, prepare MDO */ miir &= ~(MY_MASK_MIIR_MII_MDC + MY_MASK_MIIR_MII_MDO); if (mask & data) miir |= MY_MASK_MIIR_MII_MDO; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); DELAY(1); /* high MDC */ miir |= MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); DELAY(1); /* next */ mask >>= 1; } /* low MDC */ miir &= ~MY_MASK_MIIR_MII_MDC; CSR_WRITE_4(sc, MY_MANAGEMENT, miir); } return; } /* * Program the 64-bit multicast hash filter. */ static void my_setmulti(struct my_softc * sc) { struct ifnet *ifp; int h = 0; u_int32_t hashes[2] = {0, 0}; struct ifmultiaddr *ifma; u_int32_t rxfilt; int mcnt = 0; MY_LOCK_ASSERT(sc); ifp = sc->my_ifp; rxfilt = CSR_READ_4(sc, MY_TCRRCR); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= MY_AM; CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); CSR_WRITE_4(sc, MY_MAR0, 0xFFFFFFFF); CSR_WRITE_4(sc, MY_MAR1, 0xFFFFFFFF); return; } /* first, zot all the existing hash bits */ CSR_WRITE_4(sc, MY_MAR0, 0); CSR_WRITE_4(sc, MY_MAR1, 0); /* now program new ones */ if_maddr_rlock(ifp); CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ~ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); mcnt++; } if_maddr_runlock(ifp); if (mcnt) rxfilt |= MY_AM; else rxfilt &= ~MY_AM; CSR_WRITE_4(sc, MY_MAR0, hashes[0]); CSR_WRITE_4(sc, MY_MAR1, hashes[1]); CSR_WRITE_4(sc, MY_TCRRCR, rxfilt); return; } /* * Initiate an autonegotiation session. */ static void my_autoneg_xmit(struct my_softc * sc) { u_int16_t phy_sts = 0; MY_LOCK_ASSERT(sc); my_phy_writereg(sc, PHY_BMCR, PHY_BMCR_RESET); DELAY(500); while (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_RESET); phy_sts = my_phy_readreg(sc, PHY_BMCR); phy_sts |= PHY_BMCR_AUTONEGENBL | PHY_BMCR_AUTONEGRSTR; my_phy_writereg(sc, PHY_BMCR, phy_sts); return; } static void my_autoneg_timeout(void *arg) { struct my_softc *sc; sc = arg; MY_LOCK_ASSERT(sc); my_autoneg_mii(sc, MY_FLAG_DELAYTIMEO, 1); } /* * Invoke autonegotiation on a PHY. */ static void my_autoneg_mii(struct my_softc * sc, int flag, int verbose) { u_int16_t phy_sts = 0, media, advert, ability; u_int16_t ability2 = 0; struct ifnet *ifp; struct ifmedia *ifm; MY_LOCK_ASSERT(sc); ifm = &sc->ifmedia; ifp = sc->my_ifp; ifm->ifm_media = IFM_ETHER | IFM_AUTO; #ifndef FORCE_AUTONEG_TFOUR /* * First, see if autoneg is supported. If not, there's no point in * continuing. */ phy_sts = my_phy_readreg(sc, PHY_BMSR); if (!(phy_sts & PHY_BMSR_CANAUTONEG)) { if (verbose) device_printf(sc->my_dev, "autonegotiation not supported\n"); ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; return; } #endif switch (flag) { case MY_FLAG_FORCEDELAY: /* * XXX Never use this option anywhere but in the probe * routine: making the kernel stop dead in its tracks for * three whole seconds after we've gone multi-user is really * bad manners. */ my_autoneg_xmit(sc); DELAY(5000000); break; case MY_FLAG_SCHEDDELAY: /* * Wait for the transmitter to go idle before starting an * autoneg session, otherwise my_start() may clobber our * timeout, and we don't want to allow transmission during an * autoneg session since that can screw it up. */ if (sc->my_cdata.my_tx_head != NULL) { sc->my_want_auto = 1; MY_UNLOCK(sc); return; } my_autoneg_xmit(sc); callout_reset(&sc->my_autoneg_timer, hz * 5, my_autoneg_timeout, sc); sc->my_autoneg = 1; sc->my_want_auto = 0; return; case MY_FLAG_DELAYTIMEO: callout_stop(&sc->my_autoneg_timer); sc->my_autoneg = 0; break; default: device_printf(sc->my_dev, "invalid autoneg flag: %d\n", flag); return; } if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_AUTONEGCOMP) { if (verbose) device_printf(sc->my_dev, "autoneg complete, "); phy_sts = my_phy_readreg(sc, PHY_BMSR); } else { if (verbose) device_printf(sc->my_dev, "autoneg not complete, "); } media = my_phy_readreg(sc, PHY_BMCR); /* Link is good. Report modes and set duplex mode. */ if (my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT) { if (verbose) device_printf(sc->my_dev, "link status good. "); advert = my_phy_readreg(sc, PHY_ANAR); ability = my_phy_readreg(sc, PHY_LPAR); if ((sc->my_pinfo->my_vid == MarvellPHYID0) || (sc->my_pinfo->my_vid == LevelOnePHYID0)) { ability2 = my_phy_readreg(sc, PHY_1000SR); if (ability2 & PHY_1000SR_1000BTXFULL) { advert = 0; ability = 0; /* * this version did not support 1000M, * ifm->ifm_media = * IFM_ETHER|IFM_1000_T|IFM_FDX; */ ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; media &= ~PHY_BMCR_SPEEDSEL; media |= PHY_BMCR_1000; media |= PHY_BMCR_DUPLEX; printf("(full-duplex, 1000Mbps)\n"); } else if (ability2 & PHY_1000SR_1000BTXHALF) { advert = 0; ability = 0; /* * this version did not support 1000M, * ifm->ifm_media = IFM_ETHER|IFM_1000_T; */ ifm->ifm_media = IFM_ETHER | IFM_100_TX; media &= ~PHY_BMCR_SPEEDSEL; media &= ~PHY_BMCR_DUPLEX; media |= PHY_BMCR_1000; printf("(half-duplex, 1000Mbps)\n"); } } if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) { ifm->ifm_media = IFM_ETHER | IFM_100_T4; media |= PHY_BMCR_SPEEDSEL; media &= ~PHY_BMCR_DUPLEX; printf("(100baseT4)\n"); } else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL) { ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; media |= PHY_BMCR_SPEEDSEL; media |= PHY_BMCR_DUPLEX; printf("(full-duplex, 100Mbps)\n"); } else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF) { ifm->ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; media |= PHY_BMCR_SPEEDSEL; media &= ~PHY_BMCR_DUPLEX; printf("(half-duplex, 100Mbps)\n"); } else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL) { ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; media &= ~PHY_BMCR_SPEEDSEL; media |= PHY_BMCR_DUPLEX; printf("(full-duplex, 10Mbps)\n"); } else if (advert) { ifm->ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; media &= ~PHY_BMCR_SPEEDSEL; media &= ~PHY_BMCR_DUPLEX; printf("(half-duplex, 10Mbps)\n"); } media &= ~PHY_BMCR_AUTONEGENBL; /* Set ASIC's duplex mode to match the PHY. */ my_phy_writereg(sc, PHY_BMCR, media); my_setcfg(sc, media); } else { if (verbose) device_printf(sc->my_dev, "no carrier\n"); } my_init_locked(sc); if (sc->my_tx_pend) { sc->my_autoneg = 0; sc->my_tx_pend = 0; my_start_locked(ifp); } return; } /* * To get PHY ability. */ static void my_getmode_mii(struct my_softc * sc) { u_int16_t bmsr; struct ifnet *ifp; MY_LOCK_ASSERT(sc); ifp = sc->my_ifp; bmsr = my_phy_readreg(sc, PHY_BMSR); if (bootverbose) device_printf(sc->my_dev, "PHY status word: %x\n", bmsr); /* fallback */ sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_HDX; if (bmsr & PHY_BMSR_10BTHALF) { if (bootverbose) device_printf(sc->my_dev, "10Mbps half-duplex mode supported\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_HDX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T, 0, NULL); } if (bmsr & PHY_BMSR_10BTFULL) { if (bootverbose) device_printf(sc->my_dev, "10Mbps full-duplex mode supported\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_10_T | IFM_FDX, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_10_T | IFM_FDX; } if (bmsr & PHY_BMSR_100BTXHALF) { if (bootverbose) device_printf(sc->my_dev, "100Mbps half-duplex mode supported\n"); ifp->if_baudrate = 100000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_HDX, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_HDX; } if (bmsr & PHY_BMSR_100BTXFULL) { if (bootverbose) device_printf(sc->my_dev, "100Mbps full-duplex mode supported\n"); ifp->if_baudrate = 100000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_TX | IFM_FDX, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_TX | IFM_FDX; } /* Some also support 100BaseT4. */ if (bmsr & PHY_BMSR_100BT4) { if (bootverbose) device_printf(sc->my_dev, "100baseT4 mode supported\n"); ifp->if_baudrate = 100000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_100_T4, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_100_T4; #ifdef FORCE_AUTONEG_TFOUR if (bootverbose) device_printf(sc->my_dev, "forcing on autoneg support for BT4\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0 NULL): sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; #endif } #if 0 /* this version did not support 1000M, */ if (sc->my_pinfo->my_vid == MarvellPHYID0) { if (bootverbose) device_printf(sc->my_dev, "1000Mbps half-duplex mode supported\n"); ifp->if_baudrate = 1000000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T, 0, NULL); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_HDX, 0, NULL); if (bootverbose) device_printf(sc->my_dev, "1000Mbps full-duplex mode supported\n"); ifp->if_baudrate = 1000000000; ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_1000_T | IFM_FDX; } #endif if (bmsr & PHY_BMSR_CANAUTONEG) { if (bootverbose) device_printf(sc->my_dev, "autoneg supported\n"); ifmedia_add(&sc->ifmedia, IFM_ETHER | IFM_AUTO, 0, NULL); sc->ifmedia.ifm_media = IFM_ETHER | IFM_AUTO; } return; } /* * Set speed and duplex mode. */ static void my_setmode_mii(struct my_softc * sc, int media) { u_int16_t bmcr; MY_LOCK_ASSERT(sc); /* * If an autoneg session is in progress, stop it. */ if (sc->my_autoneg) { device_printf(sc->my_dev, "canceling autoneg session\n"); callout_stop(&sc->my_autoneg_timer); sc->my_autoneg = sc->my_want_auto = 0; bmcr = my_phy_readreg(sc, PHY_BMCR); bmcr &= ~PHY_BMCR_AUTONEGENBL; my_phy_writereg(sc, PHY_BMCR, bmcr); } device_printf(sc->my_dev, "selecting MII, "); bmcr = my_phy_readreg(sc, PHY_BMCR); bmcr &= ~(PHY_BMCR_AUTONEGENBL | PHY_BMCR_SPEEDSEL | PHY_BMCR_1000 | PHY_BMCR_DUPLEX | PHY_BMCR_LOOPBK); #if 0 /* this version did not support 1000M, */ if (IFM_SUBTYPE(media) == IFM_1000_T) { printf("1000Mbps/T4, half-duplex\n"); bmcr &= ~PHY_BMCR_SPEEDSEL; bmcr &= ~PHY_BMCR_DUPLEX; bmcr |= PHY_BMCR_1000; } #endif if (IFM_SUBTYPE(media) == IFM_100_T4) { printf("100Mbps/T4, half-duplex\n"); bmcr |= PHY_BMCR_SPEEDSEL; bmcr &= ~PHY_BMCR_DUPLEX; } if (IFM_SUBTYPE(media) == IFM_100_TX) { printf("100Mbps, "); bmcr |= PHY_BMCR_SPEEDSEL; } if (IFM_SUBTYPE(media) == IFM_10_T) { printf("10Mbps, "); bmcr &= ~PHY_BMCR_SPEEDSEL; } if ((media & IFM_GMASK) == IFM_FDX) { printf("full duplex\n"); bmcr |= PHY_BMCR_DUPLEX; } else { printf("half duplex\n"); bmcr &= ~PHY_BMCR_DUPLEX; } my_phy_writereg(sc, PHY_BMCR, bmcr); my_setcfg(sc, bmcr); return; } /* * The Myson manual states that in order to fiddle with the 'full-duplex' and * '100Mbps' bits in the netconfig register, we first have to put the * transmit and/or receive logic in the idle state. */ static void my_setcfg(struct my_softc * sc, int bmcr) { int i, restart = 0; MY_LOCK_ASSERT(sc); if (CSR_READ_4(sc, MY_TCRRCR) & (MY_TE | MY_RE)) { restart = 1; MY_CLRBIT(sc, MY_TCRRCR, (MY_TE | MY_RE)); for (i = 0; i < MY_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, MY_TCRRCR) & (MY_TXRUN | MY_RXRUN))) break; } if (i == MY_TIMEOUT) device_printf(sc->my_dev, "failed to force tx and rx to idle \n"); } MY_CLRBIT(sc, MY_TCRRCR, MY_PS1000); MY_CLRBIT(sc, MY_TCRRCR, MY_PS10); if (bmcr & PHY_BMCR_1000) MY_SETBIT(sc, MY_TCRRCR, MY_PS1000); else if (!(bmcr & PHY_BMCR_SPEEDSEL)) MY_SETBIT(sc, MY_TCRRCR, MY_PS10); if (bmcr & PHY_BMCR_DUPLEX) MY_SETBIT(sc, MY_TCRRCR, MY_FD); else MY_CLRBIT(sc, MY_TCRRCR, MY_FD); if (restart) MY_SETBIT(sc, MY_TCRRCR, MY_TE | MY_RE); return; } static void my_reset(struct my_softc * sc) { int i; MY_LOCK_ASSERT(sc); MY_SETBIT(sc, MY_BCR, MY_SWR); for (i = 0; i < MY_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_4(sc, MY_BCR) & MY_SWR)) break; } if (i == MY_TIMEOUT) device_printf(sc->my_dev, "reset never completed!\n"); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); return; } /* * Probe for a Myson chip. Check the PCI vendor and device IDs against our * list and return a device name if we find a match. */ static int my_probe(device_t dev) { struct my_type *t; t = my_devs; while (t->my_name != NULL) { if ((pci_get_vendor(dev) == t->my_vid) && (pci_get_device(dev) == t->my_did)) { device_set_desc(dev, t->my_name); my_info_tmp = t; return (BUS_PROBE_DEFAULT); } t++; } return (ENXIO); } /* * Attach the interface. Allocate softc structures, do ifmedia setup and * ethernet/BPF attach. */ static int my_attach(device_t dev) { int i; u_char eaddr[ETHER_ADDR_LEN]; u_int32_t iobase; struct my_softc *sc; struct ifnet *ifp; int media = IFM_ETHER | IFM_100_TX | IFM_FDX; unsigned int round; caddr_t roundptr; struct my_type *p; u_int16_t phy_vid, phy_did, phy_sts = 0; int rid, error = 0; sc = device_get_softc(dev); sc->my_dev = dev; mtx_init(&sc->my_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->my_autoneg_timer, &sc->my_mtx, 0); callout_init_mtx(&sc->my_watchdog, &sc->my_mtx, 0); /* * Map control/status registers. */ pci_enable_busmaster(dev); if (my_info_tmp->my_did == MTD800ID) { iobase = pci_read_config(dev, MY_PCI_LOIO, 4); if (iobase & 0x300) MY_USEIOSPACE = 0; } rid = MY_RID; sc->my_res = bus_alloc_resource_any(dev, MY_RES, &rid, RF_ACTIVE); if (sc->my_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto destroy_mutex; } sc->my_btag = rman_get_bustag(sc->my_res); sc->my_bhandle = rman_get_bushandle(sc->my_res); rid = 0; sc->my_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->my_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto release_io; } sc->my_info = my_info_tmp; /* Reset the adapter. */ MY_LOCK(sc); my_reset(sc); MY_UNLOCK(sc); /* * Get station address */ for (i = 0; i < ETHER_ADDR_LEN; ++i) eaddr[i] = CSR_READ_1(sc, MY_PAR0 + i); sc->my_ldata_ptr = malloc(sizeof(struct my_list_data) + 8, M_DEVBUF, M_NOWAIT); if (sc->my_ldata_ptr == NULL) { device_printf(dev, "no memory for list buffers!\n"); error = ENXIO; goto release_irq; } sc->my_ldata = (struct my_list_data *) sc->my_ldata_ptr; round = (uintptr_t)sc->my_ldata_ptr & 0xF; roundptr = sc->my_ldata_ptr; for (i = 0; i < 8; i++) { if (round % 8) { round++; roundptr++; } else break; } sc->my_ldata = (struct my_list_data *) roundptr; bzero(sc->my_ldata, sizeof(struct my_list_data)); ifp = sc->my_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto free_ldata; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = my_ioctl; ifp->if_start = my_start; ifp->if_init = my_init; ifp->if_baudrate = 10000000; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); if (sc->my_info->my_did == MTD803ID) sc->my_pinfo = my_phys; else { if (bootverbose) device_printf(dev, "probing for a PHY\n"); MY_LOCK(sc); for (i = MY_PHYADDR_MIN; i < MY_PHYADDR_MAX + 1; i++) { if (bootverbose) device_printf(dev, "checking address: %d\n", i); sc->my_phy_addr = i; phy_sts = my_phy_readreg(sc, PHY_BMSR); if ((phy_sts != 0) && (phy_sts != 0xffff)) break; else phy_sts = 0; } if (phy_sts) { phy_vid = my_phy_readreg(sc, PHY_VENID); phy_did = my_phy_readreg(sc, PHY_DEVID); if (bootverbose) { device_printf(dev, "found PHY at address %d, ", sc->my_phy_addr); printf("vendor id: %x device id: %x\n", phy_vid, phy_did); } p = my_phys; while (p->my_vid) { if (phy_vid == p->my_vid) { sc->my_pinfo = p; break; } p++; } if (sc->my_pinfo == NULL) sc->my_pinfo = &my_phys[PHY_UNKNOWN]; if (bootverbose) device_printf(dev, "PHY type: %s\n", sc->my_pinfo->my_name); } else { MY_UNLOCK(sc); device_printf(dev, "MII without any phy!\n"); error = ENXIO; goto free_if; } MY_UNLOCK(sc); } /* Do ifmedia setup. */ ifmedia_init(&sc->ifmedia, 0, my_ifmedia_upd, my_ifmedia_sts); MY_LOCK(sc); my_getmode_mii(sc); my_autoneg_mii(sc, MY_FLAG_FORCEDELAY, 1); media = sc->ifmedia.ifm_media; my_stop(sc); MY_UNLOCK(sc); ifmedia_set(&sc->ifmedia, media); ether_ifattach(ifp, eaddr); error = bus_setup_intr(dev, sc->my_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, my_intr, sc, &sc->my_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); goto detach_if; } return (0); detach_if: ether_ifdetach(ifp); free_if: if_free(ifp); free_ldata: free(sc->my_ldata_ptr, M_DEVBUF); release_irq: bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); release_io: bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); destroy_mutex: mtx_destroy(&sc->my_mtx); return (error); } static int my_detach(device_t dev) { struct my_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->my_ifp; ether_ifdetach(ifp); MY_LOCK(sc); my_stop(sc); MY_UNLOCK(sc); bus_teardown_intr(dev, sc->my_irq, sc->my_intrhand); callout_drain(&sc->my_watchdog); callout_drain(&sc->my_autoneg_timer); if_free(ifp); free(sc->my_ldata_ptr, M_DEVBUF); bus_release_resource(dev, SYS_RES_IRQ, 0, sc->my_irq); bus_release_resource(dev, MY_RES, MY_RID, sc->my_res); mtx_destroy(&sc->my_mtx); return (0); } /* * Initialize the transmit descriptors. */ static int my_list_tx_init(struct my_softc * sc) { struct my_chain_data *cd; struct my_list_data *ld; int i; MY_LOCK_ASSERT(sc); cd = &sc->my_cdata; ld = sc->my_ldata; for (i = 0; i < MY_TX_LIST_CNT; i++) { cd->my_tx_chain[i].my_ptr = &ld->my_tx_list[i]; if (i == (MY_TX_LIST_CNT - 1)) cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[0]; else cd->my_tx_chain[i].my_nextdesc = &cd->my_tx_chain[i + 1]; } cd->my_tx_free = &cd->my_tx_chain[0]; cd->my_tx_tail = cd->my_tx_head = NULL; return (0); } /* * Initialize the RX descriptors and allocate mbufs for them. Note that we * arrange the descriptors in a closed ring, so that the last descriptor * points back to the first. */ static int my_list_rx_init(struct my_softc * sc) { struct my_chain_data *cd; struct my_list_data *ld; int i; MY_LOCK_ASSERT(sc); cd = &sc->my_cdata; ld = sc->my_ldata; for (i = 0; i < MY_RX_LIST_CNT; i++) { cd->my_rx_chain[i].my_ptr = (struct my_desc *) & ld->my_rx_list[i]; if (my_newbuf(sc, &cd->my_rx_chain[i]) == ENOBUFS) { MY_UNLOCK(sc); return (ENOBUFS); } if (i == (MY_RX_LIST_CNT - 1)) { cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[0]; ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[0]); } else { cd->my_rx_chain[i].my_nextdesc = &cd->my_rx_chain[i + 1]; ld->my_rx_list[i].my_next = vtophys(&ld->my_rx_list[i + 1]); } } cd->my_rx_head = &cd->my_rx_chain[0]; return (0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int my_newbuf(struct my_softc * sc, struct my_chain_onefrag * c) { struct mbuf *m_new = NULL; MY_LOCK_ASSERT(sc); MGETHDR(m_new, M_NOWAIT, MT_DATA); if (m_new == NULL) { device_printf(sc->my_dev, "no memory for rx list -- packet dropped!\n"); return (ENOBUFS); } if (!(MCLGET(m_new, M_NOWAIT))) { device_printf(sc->my_dev, "no memory for rx list -- packet dropped!\n"); m_freem(m_new); return (ENOBUFS); } c->my_mbuf = m_new; c->my_ptr->my_data = vtophys(mtod(m_new, caddr_t)); c->my_ptr->my_ctl = (MCLBYTES - 1) << MY_RBSShift; c->my_ptr->my_status = MY_OWNByNIC; return (0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to the higher * level protocols. */ static void my_rxeof(struct my_softc * sc) { struct ether_header *eh; struct mbuf *m; struct ifnet *ifp; struct my_chain_onefrag *cur_rx; int total_len = 0; u_int32_t rxstat; MY_LOCK_ASSERT(sc); ifp = sc->my_ifp; while (!((rxstat = sc->my_cdata.my_rx_head->my_ptr->my_status) & MY_OWNByNIC)) { cur_rx = sc->my_cdata.my_rx_head; sc->my_cdata.my_rx_head = cur_rx->my_nextdesc; if (rxstat & MY_ES) { /* error summary: give up this rx pkt */ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->my_ptr->my_status = MY_OWNByNIC; continue; } /* No errors; receive the packet. */ total_len = (rxstat & MY_FLNGMASK) >> MY_FLNGShift; total_len -= ETHER_CRC_LEN; if (total_len < MINCLSIZE) { m = m_devget(mtod(cur_rx->my_mbuf, char *), total_len, 0, ifp, NULL); cur_rx->my_ptr->my_status = MY_OWNByNIC; if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); continue; } } else { m = cur_rx->my_mbuf; /* * Try to conjure up a new mbuf cluster. If that * fails, it means we have an out of memory condition * and should leave the buffer in place and continue. * This will result in a lost packet, but there's * little else we can do in this situation. */ if (my_newbuf(sc, cur_rx) == ENOBUFS) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); cur_rx->my_ptr->my_status = MY_OWNByNIC; continue; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = m->m_len = total_len; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); eh = mtod(m, struct ether_header *); #if NBPFILTER > 0 /* * Handle BPF listeners. Let the BPF user see the packet, but * don't pass it up to the ether_input() layer unless it's a * broadcast packet, multicast packet, matches our ethernet * address or the interface is in promiscuous mode. */ if (bpf_peers_present(ifp->if_bpf)) { bpf_mtap(ifp->if_bpf, m); if (ifp->if_flags & IFF_PROMISC && (bcmp(eh->ether_dhost, IF_LLADDR(sc->my_ifp), ETHER_ADDR_LEN) && (eh->ether_dhost[0] & 1) == 0)) { m_freem(m); continue; } } #endif MY_UNLOCK(sc); (*ifp->if_input)(ifp, m); MY_LOCK(sc); } return; } /* * A frame was downloaded to the chip. It's safe for us to clean up the list * buffers. */ static void my_txeof(struct my_softc * sc) { struct my_chain *cur_tx; struct ifnet *ifp; MY_LOCK_ASSERT(sc); ifp = sc->my_ifp; /* Clear the timeout timer. */ sc->my_timer = 0; if (sc->my_cdata.my_tx_head == NULL) { return; } /* * Go through our tx list and free mbufs for those frames that have * been transmitted. */ while (sc->my_cdata.my_tx_head->my_mbuf != NULL) { u_int32_t txstat; cur_tx = sc->my_cdata.my_tx_head; txstat = MY_TXSTATUS(cur_tx); if ((txstat & MY_OWNByNIC) || txstat == MY_UNSENT) break; if (!(CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced)) { if (txstat & MY_TXERR) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (txstat & MY_EC) /* excessive collision */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); if (txstat & MY_LC) /* late collision */ if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); } if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & MY_NCRMASK) >> MY_NCRShift); } if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); m_freem(cur_tx->my_mbuf); cur_tx->my_mbuf = NULL; if (sc->my_cdata.my_tx_head == sc->my_cdata.my_tx_tail) { sc->my_cdata.my_tx_head = NULL; sc->my_cdata.my_tx_tail = NULL; break; } sc->my_cdata.my_tx_head = cur_tx->my_nextdesc; } if (CSR_READ_4(sc, MY_TCRRCR) & MY_Enhanced) { if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (CSR_READ_4(sc, MY_TSR) & MY_NCRMask)); } return; } /* * TX 'end of channel' interrupt handler. */ static void my_txeoc(struct my_softc * sc) { struct ifnet *ifp; MY_LOCK_ASSERT(sc); ifp = sc->my_ifp; sc->my_timer = 0; if (sc->my_cdata.my_tx_head == NULL) { ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; sc->my_cdata.my_tx_tail = NULL; if (sc->my_want_auto) my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); } else { if (MY_TXOWN(sc->my_cdata.my_tx_head) == MY_UNSENT) { MY_TXOWN(sc->my_cdata.my_tx_head) = MY_OWNByNIC; sc->my_timer = 5; CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); } } return; } static void my_intr(void *arg) { struct my_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; MY_LOCK(sc); ifp = sc->my_ifp; if (!(ifp->if_flags & IFF_UP)) { MY_UNLOCK(sc); return; } /* Disable interrupts. */ CSR_WRITE_4(sc, MY_IMR, 0x00000000); for (;;) { status = CSR_READ_4(sc, MY_ISR); status &= MY_INTRS; if (status) CSR_WRITE_4(sc, MY_ISR, status); else break; if (status & MY_RI) /* receive interrupt */ my_rxeof(sc); if ((status & MY_RBU) || (status & MY_RxErr)) { /* rx buffer unavailable or rx error */ if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); #ifdef foo my_stop(sc); my_reset(sc); my_init_locked(sc); #endif } if (status & MY_TI) /* tx interrupt */ my_txeof(sc); if (status & MY_ETI) /* tx early interrupt */ my_txeof(sc); if (status & MY_TBU) /* tx buffer unavailable */ my_txeoc(sc); #if 0 /* 90/1/18 delete */ if (status & MY_FBE) { my_reset(sc); my_init_locked(sc); } #endif } /* Re-enable interrupts. */ CSR_WRITE_4(sc, MY_IMR, MY_INTRS); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) my_start_locked(ifp); MY_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int my_encap(struct my_softc * sc, struct my_chain * c, struct mbuf * m_head) { struct my_desc *f = NULL; int total_len; struct mbuf *m, *m_new = NULL; MY_LOCK_ASSERT(sc); /* calculate the total tx pkt length */ total_len = 0; for (m = m_head; m != NULL; m = m->m_next) total_len += m->m_len; /* * Start packing the mbufs in this chain into the fragment pointers. * Stop when we run out of fragments or hit the end of the mbuf * chain. */ m = m_head; MGETHDR(m_new, M_NOWAIT, MT_DATA); if (m_new == NULL) { device_printf(sc->my_dev, "no memory for tx list"); return (1); } if (m_head->m_pkthdr.len > MHLEN) { if (!(MCLGET(m_new, M_NOWAIT))) { m_freem(m_new); device_printf(sc->my_dev, "no memory for tx list"); return (1); } } m_copydata(m_head, 0, m_head->m_pkthdr.len, mtod(m_new, caddr_t)); m_new->m_pkthdr.len = m_new->m_len = m_head->m_pkthdr.len; m_freem(m_head); m_head = m_new; f = &c->my_ptr->my_frag[0]; f->my_status = 0; f->my_data = vtophys(mtod(m_new, caddr_t)); total_len = m_new->m_len; f->my_ctl = MY_TXFD | MY_TXLD | MY_CRCEnable | MY_PADEnable; f->my_ctl |= total_len << MY_PKTShift; /* pkt size */ f->my_ctl |= total_len; /* buffer size */ /* 89/12/29 add, for mtd891 *//* [ 89? ] */ if (sc->my_info->my_did == MTD891ID) f->my_ctl |= MY_ETIControl | MY_RetryTxLC; c->my_mbuf = m_head; c->my_lastdesc = 0; MY_TXNEXT(c) = vtophys(&c->my_nextdesc->my_ptr->my_frag[0]); return (0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void my_start(struct ifnet * ifp) { struct my_softc *sc; sc = ifp->if_softc; MY_LOCK(sc); my_start_locked(ifp); MY_UNLOCK(sc); } static void my_start_locked(struct ifnet * ifp) { struct my_softc *sc; struct mbuf *m_head = NULL; struct my_chain *cur_tx = NULL, *start_tx; sc = ifp->if_softc; MY_LOCK_ASSERT(sc); if (sc->my_autoneg) { sc->my_tx_pend = 1; return; } /* * Check for an available queue slot. If there are none, punt. */ if (sc->my_cdata.my_tx_free->my_mbuf != NULL) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; return; } start_tx = sc->my_cdata.my_tx_free; while (sc->my_cdata.my_tx_free->my_mbuf == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; /* Pick a descriptor off the free list. */ cur_tx = sc->my_cdata.my_tx_free; sc->my_cdata.my_tx_free = cur_tx->my_nextdesc; /* Pack the data into the descriptor. */ my_encap(sc, cur_tx, m_head); if (cur_tx != start_tx) MY_TXOWN(cur_tx) = MY_OWNByNIC; #if NBPFILTER > 0 /* * If there's a BPF listener, bounce a copy of this frame to * him. */ BPF_MTAP(ifp, cur_tx->my_mbuf); #endif } /* * If there are no packets queued, bail. */ if (cur_tx == NULL) { return; } /* * Place the request for the upload interrupt in the last descriptor * in the chain. This way, if we're chaining several packets at once, * we'll only get an interrupt once for the whole chain rather than * once for each packet. */ MY_TXCTL(cur_tx) |= MY_TXIC; cur_tx->my_ptr->my_frag[0].my_ctl |= MY_TXIC; sc->my_cdata.my_tx_tail = cur_tx; if (sc->my_cdata.my_tx_head == NULL) sc->my_cdata.my_tx_head = start_tx; MY_TXOWN(start_tx) = MY_OWNByNIC; CSR_WRITE_4(sc, MY_TXPDR, 0xFFFFFFFF); /* tx polling demand */ /* * Set a timeout in case the chip goes out to lunch. */ sc->my_timer = 5; return; } static void my_init(void *xsc) { struct my_softc *sc = xsc; MY_LOCK(sc); my_init_locked(sc); MY_UNLOCK(sc); } static void my_init_locked(struct my_softc *sc) { struct ifnet *ifp = sc->my_ifp; u_int16_t phy_bmcr = 0; MY_LOCK_ASSERT(sc); if (sc->my_autoneg) { return; } if (sc->my_pinfo != NULL) phy_bmcr = my_phy_readreg(sc, PHY_BMCR); /* * Cancel pending I/O and free all RX/TX buffers. */ my_stop(sc); my_reset(sc); /* * Set cache alignment and burst length. */ #if 0 /* 89/9/1 modify, */ CSR_WRITE_4(sc, MY_BCR, MY_RPBLE512); CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF); #endif CSR_WRITE_4(sc, MY_BCR, MY_PBL8); CSR_WRITE_4(sc, MY_TCRRCR, MY_TFTSF | MY_RBLEN | MY_RPBLE512); /* * 89/12/29 add, for mtd891, */ if (sc->my_info->my_did == MTD891ID) { MY_SETBIT(sc, MY_BCR, MY_PROG); MY_SETBIT(sc, MY_TCRRCR, MY_Enhanced); } my_setcfg(sc, phy_bmcr); /* Init circular RX list. */ if (my_list_rx_init(sc) == ENOBUFS) { device_printf(sc->my_dev, "init failed: no memory for rx buffers\n"); my_stop(sc); return; } /* Init TX descriptors. */ my_list_tx_init(sc); /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) MY_SETBIT(sc, MY_TCRRCR, MY_PROM); else MY_CLRBIT(sc, MY_TCRRCR, MY_PROM); /* * Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) MY_SETBIT(sc, MY_TCRRCR, MY_AB); else MY_CLRBIT(sc, MY_TCRRCR, MY_AB); /* * Program the multicast filter, if necessary. */ my_setmulti(sc); /* * Load the address of the RX list. */ MY_CLRBIT(sc, MY_TCRRCR, MY_RE); CSR_WRITE_4(sc, MY_RXLBA, vtophys(&sc->my_ldata->my_rx_list[0])); /* * Enable interrupts. */ CSR_WRITE_4(sc, MY_IMR, MY_INTRS); CSR_WRITE_4(sc, MY_ISR, 0xFFFFFFFF); /* Enable receiver and transmitter. */ MY_SETBIT(sc, MY_TCRRCR, MY_RE); MY_CLRBIT(sc, MY_TCRRCR, MY_TE); CSR_WRITE_4(sc, MY_TXLBA, vtophys(&sc->my_ldata->my_tx_list[0])); MY_SETBIT(sc, MY_TCRRCR, MY_TE); /* Restore state of BMCR */ if (sc->my_pinfo != NULL) my_phy_writereg(sc, PHY_BMCR, phy_bmcr); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->my_watchdog, hz, my_watchdog, sc); return; } /* * Set media options. */ static int my_ifmedia_upd(struct ifnet * ifp) { struct my_softc *sc; struct ifmedia *ifm; sc = ifp->if_softc; MY_LOCK(sc); ifm = &sc->ifmedia; if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) { MY_UNLOCK(sc); return (EINVAL); } if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) my_autoneg_mii(sc, MY_FLAG_SCHEDDELAY, 1); else my_setmode_mii(sc, ifm->ifm_media); MY_UNLOCK(sc); return (0); } /* * Report current media status. */ static void my_ifmedia_sts(struct ifnet * ifp, struct ifmediareq * ifmr) { struct my_softc *sc; u_int16_t advert = 0, ability = 0; sc = ifp->if_softc; MY_LOCK(sc); ifmr->ifm_active = IFM_ETHER; if (!(my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_AUTONEGENBL)) { #if 0 /* this version did not support 1000M, */ if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_1000) ifmr->ifm_active = IFM_ETHER | IFM_1000TX; #endif if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_SPEEDSEL) ifmr->ifm_active = IFM_ETHER | IFM_100_TX; else ifmr->ifm_active = IFM_ETHER | IFM_10_T; if (my_phy_readreg(sc, PHY_BMCR) & PHY_BMCR_DUPLEX) ifmr->ifm_active |= IFM_FDX; else ifmr->ifm_active |= IFM_HDX; MY_UNLOCK(sc); return; } ability = my_phy_readreg(sc, PHY_LPAR); advert = my_phy_readreg(sc, PHY_ANAR); #if 0 /* this version did not support 1000M, */ if (sc->my_pinfo->my_vid = MarvellPHYID0) { ability2 = my_phy_readreg(sc, PHY_1000SR); if (ability2 & PHY_1000SR_1000BTXFULL) { advert = 0; ability = 0; ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_FDX; } else if (ability & PHY_1000SR_1000BTXHALF) { advert = 0; ability = 0; ifmr->ifm_active = IFM_ETHER|IFM_1000_T|IFM_HDX; } } #endif if (advert & PHY_ANAR_100BT4 && ability & PHY_ANAR_100BT4) ifmr->ifm_active = IFM_ETHER | IFM_100_T4; else if (advert & PHY_ANAR_100BTXFULL && ability & PHY_ANAR_100BTXFULL) ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_FDX; else if (advert & PHY_ANAR_100BTXHALF && ability & PHY_ANAR_100BTXHALF) ifmr->ifm_active = IFM_ETHER | IFM_100_TX | IFM_HDX; else if (advert & PHY_ANAR_10BTFULL && ability & PHY_ANAR_10BTFULL) ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_FDX; else if (advert & PHY_ANAR_10BTHALF && ability & PHY_ANAR_10BTHALF) ifmr->ifm_active = IFM_ETHER | IFM_10_T | IFM_HDX; MY_UNLOCK(sc); return; } static int my_ioctl(struct ifnet * ifp, u_long command, caddr_t data) { struct my_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; int error; switch (command) { case SIOCSIFFLAGS: MY_LOCK(sc); if (ifp->if_flags & IFF_UP) my_init_locked(sc); else if (ifp->if_drv_flags & IFF_DRV_RUNNING) my_stop(sc); MY_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: MY_LOCK(sc); my_setmulti(sc); MY_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void my_watchdog(void *arg) { struct my_softc *sc; struct ifnet *ifp; sc = arg; MY_LOCK_ASSERT(sc); callout_reset(&sc->my_watchdog, hz, my_watchdog, sc); if (sc->my_timer == 0 || --sc->my_timer > 0) return; ifp = sc->my_ifp; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if_printf(ifp, "watchdog timeout\n"); if (!(my_phy_readreg(sc, PHY_BMSR) & PHY_BMSR_LINKSTAT)) if_printf(ifp, "no carrier - transceiver cable problem?\n"); my_stop(sc); my_reset(sc); my_init_locked(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) my_start_locked(ifp); } /* * Stop the adapter and free any mbufs allocated to the RX and TX lists. */ static void my_stop(struct my_softc * sc) { int i; struct ifnet *ifp; MY_LOCK_ASSERT(sc); ifp = sc->my_ifp; callout_stop(&sc->my_autoneg_timer); callout_stop(&sc->my_watchdog); MY_CLRBIT(sc, MY_TCRRCR, (MY_RE | MY_TE)); CSR_WRITE_4(sc, MY_IMR, 0x00000000); CSR_WRITE_4(sc, MY_TXLBA, 0x00000000); CSR_WRITE_4(sc, MY_RXLBA, 0x00000000); /* * Free data in the RX lists. */ for (i = 0; i < MY_RX_LIST_CNT; i++) { if (sc->my_cdata.my_rx_chain[i].my_mbuf != NULL) { m_freem(sc->my_cdata.my_rx_chain[i].my_mbuf); sc->my_cdata.my_rx_chain[i].my_mbuf = NULL; } } bzero((char *)&sc->my_ldata->my_rx_list, sizeof(sc->my_ldata->my_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < MY_TX_LIST_CNT; i++) { if (sc->my_cdata.my_tx_chain[i].my_mbuf != NULL) { m_freem(sc->my_cdata.my_tx_chain[i].my_mbuf); sc->my_cdata.my_tx_chain[i].my_mbuf = NULL; } } bzero((char *)&sc->my_ldata->my_tx_list, sizeof(sc->my_ldata->my_tx_list)); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); return; } /* * Stop all chip I/O so that the kernel's probe routines don't get confused * by errant DMAs when rebooting. */ static int my_shutdown(device_t dev) { struct my_softc *sc; sc = device_get_softc(dev); MY_LOCK(sc); my_stop(sc); MY_UNLOCK(sc); return 0; } Index: head/sys/dev/oce/oce_if.c =================================================================== --- head/sys/dev/oce/oce_if.c (revision 338948) +++ head/sys/dev/oce/oce_if.c (revision 338949) @@ -1,2995 +1,2998 @@ /*- * SPDX-License-Identifier: BSD-3-Clause * * Copyright (C) 2013 Emulex * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * 3. Neither the name of the Emulex Corporation nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * Contact Information: * freebsd-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ /* $FreeBSD$ */ #include "opt_inet6.h" #include "opt_inet.h" #include "oce_if.h" #include "oce_user.h" #define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO) /* UE Status Low CSR */ static char *ue_status_low_desc[] = { "CEV", "CTX", "DBUF", "ERX", "Host", "MPU", "NDMA", "PTC ", "RDMA ", "RXF ", "RXIPS ", "RXULP0 ", "RXULP1 ", "RXULP2 ", "TIM ", "TPOST ", "TPRE ", "TXIPS ", "TXULP0 ", "TXULP1 ", "UC ", "WDMA ", "TXULP2 ", "HOST1 ", "P0_OB_LINK ", "P1_OB_LINK ", "HOST_GPIO ", "MBOX ", "AXGMAC0", "AXGMAC1", "JTAG", "MPU_INTPEND" }; /* UE Status High CSR */ static char *ue_status_hi_desc[] = { "LPCMEMHOST", "MGMT_MAC", "PCS0ONLINE", "MPU_IRAM", "PCS1ONLINE", "PCTL0", "PCTL1", "PMEM", "RR", "TXPB", "RXPP", "XAUI", "TXP", "ARM", "IPC", "HOST2", "HOST3", "HOST4", "HOST5", "HOST6", "HOST7", "HOST8", "HOST9", "NETC", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown", "Unknown" }; struct oce_common_cqe_info{ uint8_t vtp:1; uint8_t l4_cksum_pass:1; uint8_t ip_cksum_pass:1; uint8_t ipv6_frame:1; uint8_t qnq:1; uint8_t rsvd:3; uint8_t num_frags; uint16_t pkt_size; uint16_t vtag; }; /* Driver entry points prototypes */ static int oce_probe(device_t dev); static int oce_attach(device_t dev); static int oce_detach(device_t dev); static int oce_shutdown(device_t dev); static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data); static void oce_init(void *xsc); static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m); static void oce_multiq_flush(struct ifnet *ifp); /* Driver interrupt routines protypes */ static void oce_intr(void *arg, int pending); static int oce_setup_intr(POCE_SOFTC sc); static int oce_fast_isr(void *arg); static int oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending)); /* Media callbacks prototypes */ static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req); static int oce_media_change(struct ifnet *ifp); /* Transmit routines prototypes */ static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index); static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq); static void oce_process_tx_completion(struct oce_wq *wq); static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq); /* Receive routines prototypes */ static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe); static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe); static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe); static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq); static uint16_t oce_rq_handler_lro(void *arg); static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2); static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2); static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m); /* Helper function prototypes in this file */ static int oce_attach_ifp(POCE_SOFTC sc); static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag); static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag); static int oce_vid_config(POCE_SOFTC sc); static void oce_mac_addr_set(POCE_SOFTC sc); static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data); static void oce_local_timer(void *arg); static void oce_if_deactivate(POCE_SOFTC sc); static void oce_if_activate(POCE_SOFTC sc); static void setup_max_queues_want(POCE_SOFTC sc); static void update_queues_got(POCE_SOFTC sc); static void process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe); static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m); static void oce_get_config(POCE_SOFTC sc); static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete); static void oce_read_env_variables(POCE_SOFTC sc); /* IP specific */ #if defined(INET6) || defined(INET) static int oce_init_lro(POCE_SOFTC sc); static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp); #endif static device_method_t oce_dispatch[] = { DEVMETHOD(device_probe, oce_probe), DEVMETHOD(device_attach, oce_attach), DEVMETHOD(device_detach, oce_detach), DEVMETHOD(device_shutdown, oce_shutdown), DEVMETHOD_END }; static driver_t oce_driver = { "oce", oce_dispatch, sizeof(OCE_SOFTC) }; static devclass_t oce_devclass; -DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0); -MODULE_DEPEND(oce, pci, 1, 1, 1); -MODULE_DEPEND(oce, ether, 1, 1, 1); -MODULE_VERSION(oce, 1); - - /* global vars */ const char component_revision[32] = {"///" COMPONENT_REVISION "///"}; /* Module capabilites and parameters */ uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED; uint32_t oce_enable_rss = OCE_MODCAP_RSS; uint32_t oce_rq_buf_size = 2048; TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled); TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss); /* Supported devices table */ static uint32_t supportedDevices[] = { (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE2, (PCI_VENDOR_SERVERENGINES << 16) | PCI_PRODUCT_BE3, (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_BE3, (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201, (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_XE201_VF, (PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH }; + + +DRIVER_MODULE(oce, pci, oce_driver, oce_devclass, 0, 0); +MODULE_PNP_INFO("W32:vendor/device", pci, oce, supportedDevices, + nitems(supportedDevices)); +MODULE_DEPEND(oce, pci, 1, 1, 1); +MODULE_DEPEND(oce, ether, 1, 1, 1); +MODULE_VERSION(oce, 1); + POCE_SOFTC softc_head = NULL; POCE_SOFTC softc_tail = NULL; struct oce_rdma_if *oce_rdma_if = NULL; /***************************************************************************** * Driver entry points functions * *****************************************************************************/ static int oce_probe(device_t dev) { uint16_t vendor = 0; uint16_t device = 0; int i = 0; char str[256] = {0}; POCE_SOFTC sc; sc = device_get_softc(dev); bzero(sc, sizeof(OCE_SOFTC)); sc->dev = dev; vendor = pci_get_vendor(dev); device = pci_get_device(dev); for (i = 0; i < (sizeof(supportedDevices) / sizeof(uint32_t)); i++) { if (vendor == ((supportedDevices[i] >> 16) & 0xffff)) { if (device == (supportedDevices[i] & 0xffff)) { sprintf(str, "%s:%s", "Emulex CNA NIC function", component_revision); device_set_desc_copy(dev, str); switch (device) { case PCI_PRODUCT_BE2: sc->flags |= OCE_FLAGS_BE2; break; case PCI_PRODUCT_BE3: sc->flags |= OCE_FLAGS_BE3; break; case PCI_PRODUCT_XE201: case PCI_PRODUCT_XE201_VF: sc->flags |= OCE_FLAGS_XE201; break; case PCI_PRODUCT_SH: sc->flags |= OCE_FLAGS_SH; break; default: return ENXIO; } return BUS_PROBE_DEFAULT; } } } return ENXIO; } static int oce_attach(device_t dev) { POCE_SOFTC sc; int rc = 0; sc = device_get_softc(dev); rc = oce_hw_pci_alloc(sc); if (rc) return rc; sc->tx_ring_size = OCE_TX_RING_SIZE; sc->rx_ring_size = OCE_RX_RING_SIZE; /* receive fragment size should be multiple of 2K */ sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048); sc->flow_control = OCE_DEFAULT_FLOW_CONTROL; sc->promisc = OCE_DEFAULT_PROMISCUOUS; LOCK_CREATE(&sc->bmbx_lock, "Mailbox_lock"); LOCK_CREATE(&sc->dev_lock, "Device_lock"); /* initialise the hardware */ rc = oce_hw_init(sc); if (rc) goto pci_res_free; oce_read_env_variables(sc); oce_get_config(sc); setup_max_queues_want(sc); rc = oce_setup_intr(sc); if (rc) goto mbox_free; rc = oce_queue_init_all(sc); if (rc) goto intr_free; rc = oce_attach_ifp(sc); if (rc) goto queues_free; #if defined(INET6) || defined(INET) rc = oce_init_lro(sc); if (rc) goto ifp_free; #endif rc = oce_hw_start(sc); if (rc) goto lro_free; sc->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, oce_add_vlan, sc, EVENTHANDLER_PRI_FIRST); sc->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, oce_del_vlan, sc, EVENTHANDLER_PRI_FIRST); rc = oce_stats_init(sc); if (rc) goto vlan_free; oce_add_sysctls(sc); callout_init(&sc->timer, CALLOUT_MPSAFE); rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc); if (rc) goto stats_free; sc->next =NULL; if (softc_tail != NULL) { softc_tail->next = sc; } else { softc_head = sc; } softc_tail = sc; return 0; stats_free: callout_drain(&sc->timer); oce_stats_free(sc); vlan_free: if (sc->vlan_attach) EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach); if (sc->vlan_detach) EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach); oce_hw_intr_disable(sc); lro_free: #if defined(INET6) || defined(INET) oce_free_lro(sc); ifp_free: #endif ether_ifdetach(sc->ifp); if_free(sc->ifp); queues_free: oce_queue_release_all(sc); intr_free: oce_intr_free(sc); mbox_free: oce_dma_free(sc, &sc->bsmbx); pci_res_free: oce_hw_pci_free(sc); LOCK_DESTROY(&sc->dev_lock); LOCK_DESTROY(&sc->bmbx_lock); return rc; } static int oce_detach(device_t dev) { POCE_SOFTC sc = device_get_softc(dev); POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL; poce_sc_tmp = softc_head; ppoce_sc_tmp1 = &softc_head; while (poce_sc_tmp != NULL) { if (poce_sc_tmp == sc) { *ppoce_sc_tmp1 = sc->next; if (sc->next == NULL) { softc_tail = poce_sc_tmp2; } break; } poce_sc_tmp2 = poce_sc_tmp; ppoce_sc_tmp1 = &poce_sc_tmp->next; poce_sc_tmp = poce_sc_tmp->next; } LOCK(&sc->dev_lock); oce_if_deactivate(sc); UNLOCK(&sc->dev_lock); callout_drain(&sc->timer); if (sc->vlan_attach != NULL) EVENTHANDLER_DEREGISTER(vlan_config, sc->vlan_attach); if (sc->vlan_detach != NULL) EVENTHANDLER_DEREGISTER(vlan_unconfig, sc->vlan_detach); ether_ifdetach(sc->ifp); if_free(sc->ifp); oce_hw_shutdown(sc); bus_generic_detach(dev); return 0; } static int oce_shutdown(device_t dev) { int rc; rc = oce_detach(dev); return rc; } static int oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; POCE_SOFTC sc = ifp->if_softc; int rc = 0; uint32_t u; switch (command) { case SIOCGIFMEDIA: rc = ifmedia_ioctl(ifp, ifr, &sc->media, command); break; case SIOCSIFMTU: if (ifr->ifr_mtu > OCE_MAX_MTU) rc = EINVAL; else ifp->if_mtu = ifr->ifr_mtu; break; case SIOCSIFFLAGS: if (ifp->if_flags & IFF_UP) { if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; oce_init(sc); } device_printf(sc->dev, "Interface Up\n"); } else { LOCK(&sc->dev_lock); sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); oce_if_deactivate(sc); UNLOCK(&sc->dev_lock); device_printf(sc->dev, "Interface Down\n"); } if ((ifp->if_flags & IFF_PROMISC) && !sc->promisc) { if (!oce_rxf_set_promiscuous(sc, (1 | (1 << 1)))) sc->promisc = TRUE; } else if (!(ifp->if_flags & IFF_PROMISC) && sc->promisc) { if (!oce_rxf_set_promiscuous(sc, 0)) sc->promisc = FALSE; } break; case SIOCADDMULTI: case SIOCDELMULTI: rc = oce_hw_update_multicast(sc); if (rc) device_printf(sc->dev, "Update multicast address failed\n"); break; case SIOCSIFCAP: u = ifr->ifr_reqcap ^ ifp->if_capenable; if (u & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; ifp->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); if (IFCAP_TSO & ifp->if_capenable && !(IFCAP_TXCSUM & ifp->if_capenable)) { ifp->if_capenable &= ~IFCAP_TSO; ifp->if_hwassist &= ~CSUM_TSO; if_printf(ifp, "TSO disabled due to -txcsum.\n"); } } if (u & IFCAP_RXCSUM) ifp->if_capenable ^= IFCAP_RXCSUM; if (u & IFCAP_TSO4) { ifp->if_capenable ^= IFCAP_TSO4; if (IFCAP_TSO & ifp->if_capenable) { if (IFCAP_TXCSUM & ifp->if_capenable) ifp->if_hwassist |= CSUM_TSO; else { ifp->if_capenable &= ~IFCAP_TSO; ifp->if_hwassist &= ~CSUM_TSO; if_printf(ifp, "Enable txcsum first.\n"); rc = EAGAIN; } } else ifp->if_hwassist &= ~CSUM_TSO; } if (u & IFCAP_VLAN_HWTAGGING) ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; if (u & IFCAP_VLAN_HWFILTER) { ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; oce_vid_config(sc); } #if defined(INET6) || defined(INET) if (u & IFCAP_LRO) { ifp->if_capenable ^= IFCAP_LRO; if(sc->enable_hwlro) { if(ifp->if_capenable & IFCAP_LRO) { rc = oce_mbox_nic_set_iface_lro_config(sc, 1); }else { rc = oce_mbox_nic_set_iface_lro_config(sc, 0); } } } #endif break; case SIOCGPRIVATE_0: rc = oce_handle_passthrough(ifp, data); break; default: rc = ether_ioctl(ifp, command, data); break; } return rc; } static void oce_init(void *arg) { POCE_SOFTC sc = arg; LOCK(&sc->dev_lock); if (sc->ifp->if_flags & IFF_UP) { oce_if_deactivate(sc); oce_if_activate(sc); } UNLOCK(&sc->dev_lock); } static int oce_multiq_start(struct ifnet *ifp, struct mbuf *m) { POCE_SOFTC sc = ifp->if_softc; struct oce_wq *wq = NULL; int queue_index = 0; int status = 0; if (!sc->link_status) return ENXIO; if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE) queue_index = m->m_pkthdr.flowid % sc->nwqs; wq = sc->wq[queue_index]; LOCK(&wq->tx_lock); status = oce_multiq_transmit(ifp, m, wq); UNLOCK(&wq->tx_lock); return status; } static void oce_multiq_flush(struct ifnet *ifp) { POCE_SOFTC sc = ifp->if_softc; struct mbuf *m; int i = 0; for (i = 0; i < sc->nwqs; i++) { while ((m = buf_ring_dequeue_sc(sc->wq[i]->br)) != NULL) m_freem(m); } if_qflush(ifp); } /***************************************************************************** * Driver interrupt routines functions * *****************************************************************************/ static void oce_intr(void *arg, int pending) { POCE_INTR_INFO ii = (POCE_INTR_INFO) arg; POCE_SOFTC sc = ii->sc; struct oce_eq *eq = ii->eq; struct oce_eqe *eqe; struct oce_cq *cq = NULL; int i, num_eqes = 0; bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map, BUS_DMASYNC_POSTWRITE); do { eqe = RING_GET_CONSUMER_ITEM_VA(eq->ring, struct oce_eqe); if (eqe->evnt == 0) break; eqe->evnt = 0; bus_dmamap_sync(eq->ring->dma.tag, eq->ring->dma.map, BUS_DMASYNC_POSTWRITE); RING_GET(eq->ring, 1); num_eqes++; } while (TRUE); if (!num_eqes) goto eq_arm; /* Spurious */ /* Clear EQ entries, but dont arm */ oce_arm_eq(sc, eq->eq_id, num_eqes, FALSE, FALSE); /* Process TX, RX and MCC. But dont arm CQ*/ for (i = 0; i < eq->cq_valid; i++) { cq = eq->cq[i]; (*cq->cq_handler)(cq->cb_arg); } /* Arm all cqs connected to this EQ */ for (i = 0; i < eq->cq_valid; i++) { cq = eq->cq[i]; oce_arm_cq(sc, cq->cq_id, 0, TRUE); } eq_arm: oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE); return; } static int oce_setup_intr(POCE_SOFTC sc) { int rc = 0, use_intx = 0; int vector = 0, req_vectors = 0; int tot_req_vectors, tot_vectors; if (is_rss_enabled(sc)) req_vectors = MAX((sc->nrqs - 1), sc->nwqs); else req_vectors = 1; tot_req_vectors = req_vectors; if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) { if (req_vectors > 1) { tot_req_vectors += OCE_RDMA_VECTORS; sc->roce_intr_count = OCE_RDMA_VECTORS; } } if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) { sc->intr_count = req_vectors; tot_vectors = tot_req_vectors; rc = pci_alloc_msix(sc->dev, &tot_vectors); if (rc != 0) { use_intx = 1; pci_release_msi(sc->dev); } else { if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) { if (tot_vectors < tot_req_vectors) { if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) { sc->roce_intr_count = (tot_vectors / 2); } sc->intr_count = tot_vectors - sc->roce_intr_count; } } else { sc->intr_count = tot_vectors; } sc->flags |= OCE_FLAGS_USING_MSIX; } } else use_intx = 1; if (use_intx) sc->intr_count = 1; /* Scale number of queues based on intr we got */ update_queues_got(sc); if (use_intx) { device_printf(sc->dev, "Using legacy interrupt\n"); rc = oce_alloc_intr(sc, vector, oce_intr); if (rc) goto error; } else { for (; vector < sc->intr_count; vector++) { rc = oce_alloc_intr(sc, vector, oce_intr); if (rc) goto error; } } return 0; error: oce_intr_free(sc); return rc; } static int oce_fast_isr(void *arg) { POCE_INTR_INFO ii = (POCE_INTR_INFO) arg; POCE_SOFTC sc = ii->sc; if (ii->eq == NULL) return FILTER_STRAY; oce_arm_eq(sc, ii->eq->eq_id, 0, FALSE, TRUE); taskqueue_enqueue(ii->tq, &ii->task); ii->eq->intr++; return FILTER_HANDLED; } static int oce_alloc_intr(POCE_SOFTC sc, int vector, void (*isr) (void *arg, int pending)) { POCE_INTR_INFO ii = &sc->intrs[vector]; int rc = 0, rr; if (vector >= OCE_MAX_EQ) return (EINVAL); /* Set the resource id for the interrupt. * MSIx is vector + 1 for the resource id, * INTx is 0 for the resource id. */ if (sc->flags & OCE_FLAGS_USING_MSIX) rr = vector + 1; else rr = 0; ii->intr_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rr, RF_ACTIVE|RF_SHAREABLE); ii->irq_rr = rr; if (ii->intr_res == NULL) { device_printf(sc->dev, "Could not allocate interrupt\n"); rc = ENXIO; return rc; } TASK_INIT(&ii->task, 0, isr, ii); ii->vector = vector; sprintf(ii->task_name, "oce_task[%d]", ii->vector); ii->tq = taskqueue_create_fast(ii->task_name, M_NOWAIT, taskqueue_thread_enqueue, &ii->tq); taskqueue_start_threads(&ii->tq, 1, PI_NET, "%s taskq", device_get_nameunit(sc->dev)); ii->sc = sc; rc = bus_setup_intr(sc->dev, ii->intr_res, INTR_TYPE_NET, oce_fast_isr, NULL, ii, &ii->tag); return rc; } void oce_intr_free(POCE_SOFTC sc) { int i = 0; for (i = 0; i < sc->intr_count; i++) { if (sc->intrs[i].tag != NULL) bus_teardown_intr(sc->dev, sc->intrs[i].intr_res, sc->intrs[i].tag); if (sc->intrs[i].tq != NULL) taskqueue_free(sc->intrs[i].tq); if (sc->intrs[i].intr_res != NULL) bus_release_resource(sc->dev, SYS_RES_IRQ, sc->intrs[i].irq_rr, sc->intrs[i].intr_res); sc->intrs[i].tag = NULL; sc->intrs[i].intr_res = NULL; } if (sc->flags & OCE_FLAGS_USING_MSIX) pci_release_msi(sc->dev); } /****************************************************************************** * Media callbacks functions * ******************************************************************************/ static void oce_media_status(struct ifnet *ifp, struct ifmediareq *req) { POCE_SOFTC sc = (POCE_SOFTC) ifp->if_softc; req->ifm_status = IFM_AVALID; req->ifm_active = IFM_ETHER; if (sc->link_status == 1) req->ifm_status |= IFM_ACTIVE; else return; switch (sc->link_speed) { case 1: /* 10 Mbps */ req->ifm_active |= IFM_10_T | IFM_FDX; sc->speed = 10; break; case 2: /* 100 Mbps */ req->ifm_active |= IFM_100_TX | IFM_FDX; sc->speed = 100; break; case 3: /* 1 Gbps */ req->ifm_active |= IFM_1000_T | IFM_FDX; sc->speed = 1000; break; case 4: /* 10 Gbps */ req->ifm_active |= IFM_10G_SR | IFM_FDX; sc->speed = 10000; break; case 5: /* 20 Gbps */ req->ifm_active |= IFM_10G_SR | IFM_FDX; sc->speed = 20000; break; case 6: /* 25 Gbps */ req->ifm_active |= IFM_10G_SR | IFM_FDX; sc->speed = 25000; break; case 7: /* 40 Gbps */ req->ifm_active |= IFM_40G_SR4 | IFM_FDX; sc->speed = 40000; break; default: sc->speed = 0; break; } return; } int oce_media_change(struct ifnet *ifp) { return 0; } static void oce_is_pkt_dest_bmc(POCE_SOFTC sc, struct mbuf *m, boolean_t *os2bmc, struct mbuf **m_new) { struct ether_header *eh = NULL; eh = mtod(m, struct ether_header *); if (!is_os2bmc_enabled(sc) || *os2bmc) { *os2bmc = FALSE; goto done; } if (!ETHER_IS_MULTICAST(eh->ether_dhost)) goto done; if (is_mc_allowed_on_bmc(sc, eh) || is_bc_allowed_on_bmc(sc, eh) || is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) { *os2bmc = TRUE; goto done; } if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) { struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); uint8_t nexthdr = ip6->ip6_nxt; if (nexthdr == IPPROTO_ICMPV6) { struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1); switch (icmp6->icmp6_type) { case ND_ROUTER_ADVERT: *os2bmc = is_ipv6_ra_filt_enabled(sc); goto done; case ND_NEIGHBOR_ADVERT: *os2bmc = is_ipv6_na_filt_enabled(sc); goto done; default: break; } } } if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) { struct ip *ip = mtod(m, struct ip *); int iphlen = ip->ip_hl << 2; struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen); switch (uh->uh_dport) { case DHCP_CLIENT_PORT: *os2bmc = is_dhcp_client_filt_enabled(sc); goto done; case DHCP_SERVER_PORT: *os2bmc = is_dhcp_srvr_filt_enabled(sc); goto done; case NET_BIOS_PORT1: case NET_BIOS_PORT2: *os2bmc = is_nbios_filt_enabled(sc); goto done; case DHCPV6_RAS_PORT: *os2bmc = is_ipv6_ras_filt_enabled(sc); goto done; default: break; } } done: if (*os2bmc) { *m_new = m_dup(m, M_NOWAIT); if (!*m_new) { *os2bmc = FALSE; return; } *m_new = oce_insert_vlan_tag(sc, *m_new, NULL); } } /***************************************************************************** * Transmit routines functions * *****************************************************************************/ static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index) { int rc = 0, i, retry_cnt = 0; bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS]; struct mbuf *m, *m_temp, *m_new = NULL; struct oce_wq *wq = sc->wq[wq_index]; struct oce_packet_desc *pd; struct oce_nic_hdr_wqe *nichdr; struct oce_nic_frag_wqe *nicfrag; struct ether_header *eh = NULL; int num_wqes; uint32_t reg_value; boolean_t complete = TRUE; boolean_t os2bmc = FALSE; m = *mpp; if (!m) return EINVAL; if (!(m->m_flags & M_PKTHDR)) { rc = ENXIO; goto free_ret; } /* Don't allow non-TSO packets longer than MTU */ if (!is_tso_pkt(m)) { eh = mtod(m, struct ether_header *); if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE)) goto free_ret; } if(oce_tx_asic_stall_verify(sc, m)) { m = oce_insert_vlan_tag(sc, m, &complete); if(!m) { device_printf(sc->dev, "Insertion unsuccessful\n"); return 0; } } /* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less * may cause a transmit stall on that port. So the work-around is to * pad short packets (<= 32 bytes) to a 36-byte length. */ if(IS_SH(sc) || IS_XE201(sc) ) { if(m->m_pkthdr.len <= 32) { char buf[36]; bzero((void *)buf, 36); m_append(m, (36 - m->m_pkthdr.len), buf); } } tx_start: if (m->m_pkthdr.csum_flags & CSUM_TSO) { /* consolidate packet buffers for TSO/LSO segment offload */ #if defined(INET6) || defined(INET) m = oce_tso_setup(sc, mpp); #else m = NULL; #endif if (m == NULL) { rc = ENXIO; goto free_ret; } } pd = &wq->pckts[wq->pkt_desc_head]; retry: rc = bus_dmamap_load_mbuf_sg(wq->tag, pd->map, m, segs, &pd->nsegs, BUS_DMA_NOWAIT); if (rc == 0) { num_wqes = pd->nsegs + 1; if (IS_BE(sc) || IS_SH(sc)) { /*Dummy required only for BE3.*/ if (num_wqes & 1) num_wqes++; } if (num_wqes >= RING_NUM_FREE(wq->ring)) { bus_dmamap_unload(wq->tag, pd->map); return EBUSY; } atomic_store_rel_int(&wq->pkt_desc_head, (wq->pkt_desc_head + 1) % \ OCE_WQ_PACKET_ARRAY_SIZE); bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_PREWRITE); pd->mbuf = m; nichdr = RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_hdr_wqe); nichdr->u0.dw[0] = 0; nichdr->u0.dw[1] = 0; nichdr->u0.dw[2] = 0; nichdr->u0.dw[3] = 0; nichdr->u0.s.complete = complete; nichdr->u0.s.mgmt = os2bmc; nichdr->u0.s.event = 1; nichdr->u0.s.crc = 1; nichdr->u0.s.forward = 0; nichdr->u0.s.ipcs = (m->m_pkthdr.csum_flags & CSUM_IP) ? 1 : 0; nichdr->u0.s.udpcs = (m->m_pkthdr.csum_flags & CSUM_UDP) ? 1 : 0; nichdr->u0.s.tcpcs = (m->m_pkthdr.csum_flags & CSUM_TCP) ? 1 : 0; nichdr->u0.s.num_wqe = num_wqes; nichdr->u0.s.total_length = m->m_pkthdr.len; if (m->m_flags & M_VLANTAG) { nichdr->u0.s.vlan = 1; /*Vlan present*/ nichdr->u0.s.vlan_tag = m->m_pkthdr.ether_vtag; } if (m->m_pkthdr.csum_flags & CSUM_TSO) { if (m->m_pkthdr.tso_segsz) { nichdr->u0.s.lso = 1; nichdr->u0.s.lso_mss = m->m_pkthdr.tso_segsz; } if (!IS_BE(sc) || !IS_SH(sc)) nichdr->u0.s.ipcs = 1; } RING_PUT(wq->ring, 1); atomic_add_int(&wq->ring->num_used, 1); for (i = 0; i < pd->nsegs; i++) { nicfrag = RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_frag_wqe); nicfrag->u0.s.rsvd0 = 0; nicfrag->u0.s.frag_pa_hi = ADDR_HI(segs[i].ds_addr); nicfrag->u0.s.frag_pa_lo = ADDR_LO(segs[i].ds_addr); nicfrag->u0.s.frag_len = segs[i].ds_len; pd->wqe_idx = wq->ring->pidx; RING_PUT(wq->ring, 1); atomic_add_int(&wq->ring->num_used, 1); } if (num_wqes > (pd->nsegs + 1)) { nicfrag = RING_GET_PRODUCER_ITEM_VA(wq->ring, struct oce_nic_frag_wqe); nicfrag->u0.dw[0] = 0; nicfrag->u0.dw[1] = 0; nicfrag->u0.dw[2] = 0; nicfrag->u0.dw[3] = 0; pd->wqe_idx = wq->ring->pidx; RING_PUT(wq->ring, 1); atomic_add_int(&wq->ring->num_used, 1); pd->nsegs++; } if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); wq->tx_stats.tx_reqs++; wq->tx_stats.tx_wrbs += num_wqes; wq->tx_stats.tx_bytes += m->m_pkthdr.len; wq->tx_stats.tx_pkts++; bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); reg_value = (num_wqes << 16) | wq->wq_id; /* if os2bmc is not enabled or if the pkt is already tagged as bmc, do nothing */ oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new); OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value); } else if (rc == EFBIG) { if (retry_cnt == 0) { m_temp = m_defrag(m, M_NOWAIT); if (m_temp == NULL) goto free_ret; m = m_temp; *mpp = m_temp; retry_cnt = retry_cnt + 1; goto retry; } else goto free_ret; } else if (rc == ENOMEM) return rc; else goto free_ret; if (os2bmc) { m = m_new; goto tx_start; } return 0; free_ret: m_freem(*mpp); *mpp = NULL; return rc; } static void oce_process_tx_completion(struct oce_wq *wq) { struct oce_packet_desc *pd; POCE_SOFTC sc = (POCE_SOFTC) wq->parent; struct mbuf *m; pd = &wq->pckts[wq->pkt_desc_tail]; atomic_store_rel_int(&wq->pkt_desc_tail, (wq->pkt_desc_tail + 1) % OCE_WQ_PACKET_ARRAY_SIZE); atomic_subtract_int(&wq->ring->num_used, pd->nsegs + 1); bus_dmamap_sync(wq->tag, pd->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(wq->tag, pd->map); m = pd->mbuf; m_freem(m); pd->mbuf = NULL; if (sc->ifp->if_drv_flags & IFF_DRV_OACTIVE) { if (wq->ring->num_used < (wq->ring->num_items / 2)) { sc->ifp->if_drv_flags &= ~(IFF_DRV_OACTIVE); oce_tx_restart(sc, wq); } } } static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq) { if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != IFF_DRV_RUNNING) return; #if __FreeBSD_version >= 800000 if (!drbr_empty(sc->ifp, wq->br)) #else if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd)) #endif taskqueue_enqueue(taskqueue_swi, &wq->txtask); } #if defined(INET6) || defined(INET) static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp) { struct mbuf *m; #ifdef INET struct ip *ip; #endif #ifdef INET6 struct ip6_hdr *ip6; #endif struct ether_vlan_header *eh; struct tcphdr *th; uint16_t etype; int total_len = 0, ehdrlen = 0; m = *mpp; if (M_WRITABLE(m) == 0) { m = m_dup(*mpp, M_NOWAIT); if (!m) return NULL; m_freem(*mpp); *mpp = m; } eh = mtod(m, struct ether_vlan_header *); if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { etype = ntohs(eh->evl_proto); ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; } else { etype = ntohs(eh->evl_encap_proto); ehdrlen = ETHER_HDR_LEN; } switch (etype) { #ifdef INET case ETHERTYPE_IP: ip = (struct ip *)(m->m_data + ehdrlen); if (ip->ip_p != IPPROTO_TCP) return NULL; th = (struct tcphdr *)((caddr_t)ip + (ip->ip_hl << 2)); total_len = ehdrlen + (ip->ip_hl << 2) + (th->th_off << 2); break; #endif #ifdef INET6 case ETHERTYPE_IPV6: ip6 = (struct ip6_hdr *)(m->m_data + ehdrlen); if (ip6->ip6_nxt != IPPROTO_TCP) return NULL; th = (struct tcphdr *)((caddr_t)ip6 + sizeof(struct ip6_hdr)); total_len = ehdrlen + sizeof(struct ip6_hdr) + (th->th_off << 2); break; #endif default: return NULL; } m = m_pullup(m, total_len); if (!m) return NULL; *mpp = m; return m; } #endif /* INET6 || INET */ void oce_tx_task(void *arg, int npending) { struct oce_wq *wq = arg; POCE_SOFTC sc = wq->parent; struct ifnet *ifp = sc->ifp; int rc = 0; #if __FreeBSD_version >= 800000 LOCK(&wq->tx_lock); rc = oce_multiq_transmit(ifp, NULL, wq); if (rc) { device_printf(sc->dev, "TX[%d] restart failed\n", wq->queue_index); } UNLOCK(&wq->tx_lock); #else oce_start(ifp); #endif } void oce_start(struct ifnet *ifp) { POCE_SOFTC sc = ifp->if_softc; struct mbuf *m; int rc = 0; int def_q = 0; /* Defualt tx queue is 0*/ if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; if (!sc->link_status) return; do { IF_DEQUEUE(&sc->ifp->if_snd, m); if (m == NULL) break; LOCK(&sc->wq[def_q]->tx_lock); rc = oce_tx(sc, &m, def_q); UNLOCK(&sc->wq[def_q]->tx_lock); if (rc) { if (m != NULL) { sc->wq[def_q]->tx_stats.tx_stops ++; ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m); m = NULL; } break; } if (m != NULL) ETHER_BPF_MTAP(ifp, m); } while (TRUE); return; } /* Handle the Completion Queue for transmit */ uint16_t oce_wq_handler(void *arg) { struct oce_wq *wq = (struct oce_wq *)arg; POCE_SOFTC sc = wq->parent; struct oce_cq *cq = wq->cq; struct oce_nic_tx_cqe *cqe; int num_cqes = 0; LOCK(&wq->tx_compl_lock); bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); while (cqe->u0.dw[3]) { DW_SWAP((uint32_t *) cqe, sizeof(oce_wq_cqe)); wq->ring->cidx = cqe->u0.s.wqe_index + 1; if (wq->ring->cidx >= wq->ring->num_items) wq->ring->cidx -= wq->ring->num_items; oce_process_tx_completion(wq); wq->tx_stats.tx_compl++; cqe->u0.dw[3] = 0; RING_GET(cq->ring, 1); bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe); num_cqes++; } if (num_cqes) oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); UNLOCK(&wq->tx_compl_lock); return num_cqes; } static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq) { POCE_SOFTC sc = ifp->if_softc; int status = 0, queue_index = 0; struct mbuf *next = NULL; struct buf_ring *br = NULL; br = wq->br; queue_index = wq->queue_index; if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) { if (m != NULL) status = drbr_enqueue(ifp, br, m); return status; } if (m != NULL) { if ((status = drbr_enqueue(ifp, br, m)) != 0) return status; } while ((next = drbr_peek(ifp, br)) != NULL) { if (oce_tx(sc, &next, queue_index)) { if (next == NULL) { drbr_advance(ifp, br); } else { drbr_putback(ifp, br, next); wq->tx_stats.tx_stops ++; ifp->if_drv_flags |= IFF_DRV_OACTIVE; } break; } drbr_advance(ifp, br); if_inc_counter(ifp, IFCOUNTER_OBYTES, next->m_pkthdr.len); if (next->m_flags & M_MCAST) if_inc_counter(ifp, IFCOUNTER_OMCASTS, 1); ETHER_BPF_MTAP(ifp, next); } return 0; } /***************************************************************************** * Receive routines functions * *****************************************************************************/ static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2) { uint32_t *p; struct ether_header *eh = NULL; struct tcphdr *tcp_hdr = NULL; struct ip *ip4_hdr = NULL; struct ip6_hdr *ip6 = NULL; uint32_t payload_len = 0; eh = mtod(m, struct ether_header *); /* correct IP header */ if(!cqe2->ipv6_frame) { ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header)); ip4_hdr->ip_ttl = cqe2->frame_lifespan; ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header)); tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip)); }else { ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header)); ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan; payload_len = cqe2->coalesced_size - sizeof(struct ether_header) - sizeof(struct ip6_hdr); ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len); tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr)); } /* correct tcp header */ tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num); if(cqe2->push) { tcp_hdr->th_flags |= TH_PUSH; } tcp_hdr->th_win = htons(cqe2->tcp_window); tcp_hdr->th_sum = 0xffff; if(cqe2->ts_opt) { p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2); *p = cqe1->tcp_timestamp_val; *(p+1) = cqe1->tcp_timestamp_ecr; } return; } static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m) { POCE_SOFTC sc = (POCE_SOFTC) rq->parent; uint32_t i = 0, frag_len = 0; uint32_t len = cqe_info->pkt_size; struct oce_packet_desc *pd; struct mbuf *tail = NULL; for (i = 0; i < cqe_info->num_frags; i++) { if (rq->ring->cidx == rq->ring->pidx) { device_printf(sc->dev, "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n"); return; } pd = &rq->pckts[rq->ring->cidx]; bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(rq->tag, pd->map); RING_GET(rq->ring, 1); rq->pending--; frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len; pd->mbuf->m_len = frag_len; if (tail != NULL) { /* additional fragments */ pd->mbuf->m_flags &= ~M_PKTHDR; tail->m_next = pd->mbuf; if(rq->islro) tail->m_nextpkt = NULL; tail = pd->mbuf; } else { /* first fragment, fill out much of the packet header */ pd->mbuf->m_pkthdr.len = len; if(rq->islro) pd->mbuf->m_nextpkt = NULL; pd->mbuf->m_pkthdr.csum_flags = 0; if (IF_CSUM_ENABLED(sc)) { if (cqe_info->l4_cksum_pass) { if(!cqe_info->ipv6_frame) { /* IPV4 */ pd->mbuf->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); }else { /* IPV6 frame */ if(rq->islro) { pd->mbuf->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); } } pd->mbuf->m_pkthdr.csum_data = 0xffff; } if (cqe_info->ip_cksum_pass) { pd->mbuf->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED|CSUM_IP_VALID); } } *m = tail = pd->mbuf; } pd->mbuf = NULL; len -= frag_len; } return; } static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2) { POCE_SOFTC sc = (POCE_SOFTC) rq->parent; struct nic_hwlro_cqe_part1 *cqe1 = NULL; struct mbuf *m = NULL; struct oce_common_cqe_info cq_info; /* parse cqe */ if(cqe2 == NULL) { cq_info.pkt_size = cqe->pkt_size; cq_info.vtag = cqe->vlan_tag; cq_info.l4_cksum_pass = cqe->l4_cksum_pass; cq_info.ip_cksum_pass = cqe->ip_cksum_pass; cq_info.ipv6_frame = cqe->ipv6_frame; cq_info.vtp = cqe->vtp; cq_info.qnq = cqe->qnq; }else { cqe1 = (struct nic_hwlro_cqe_part1 *)cqe; cq_info.pkt_size = cqe2->coalesced_size; cq_info.vtag = cqe2->vlan_tag; cq_info.l4_cksum_pass = cqe2->l4_cksum_pass; cq_info.ip_cksum_pass = cqe2->ip_cksum_pass; cq_info.ipv6_frame = cqe2->ipv6_frame; cq_info.vtp = cqe2->vtp; cq_info.qnq = cqe1->qnq; } cq_info.vtag = BSWAP_16(cq_info.vtag); cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size; if(cq_info.pkt_size % rq->cfg.frag_size) cq_info.num_frags++; oce_rx_mbuf_chain(rq, &cq_info, &m); if (m) { if(cqe2) { //assert(cqe2->valid != 0); //assert(cqe2->cqe_type != 2); oce_correct_header(m, cqe1, cqe2); } m->m_pkthdr.rcvif = sc->ifp; #if __FreeBSD_version >= 800000 if (rq->queue_index) m->m_pkthdr.flowid = (rq->queue_index - 1); else m->m_pkthdr.flowid = rq->queue_index; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); #endif /* This deternies if vlan tag is Valid */ if (cq_info.vtp) { if (sc->function_mode & FNM_FLEX10_MODE) { /* FLEX10. If QnQ is not set, neglect VLAN */ if (cq_info.qnq) { m->m_pkthdr.ether_vtag = cq_info.vtag; m->m_flags |= M_VLANTAG; } } else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK)) { /* In UMC mode generally pvid will be striped by hw. But in some cases we have seen it comes with pvid. So if pvid == vlan, neglect vlan. */ m->m_pkthdr.ether_vtag = cq_info.vtag; m->m_flags |= M_VLANTAG; } } if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); (*sc->ifp->if_input) (sc->ifp, m); /* Update rx stats per queue */ rq->rx_stats.rx_pkts++; rq->rx_stats.rx_bytes += cq_info.pkt_size; rq->rx_stats.rx_frags += cq_info.num_frags; rq->rx_stats.rx_ucast_pkts++; } return; } static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe) { POCE_SOFTC sc = (POCE_SOFTC) rq->parent; int len; struct mbuf *m = NULL; struct oce_common_cqe_info cq_info; uint16_t vtag = 0; /* Is it a flush compl that has no data */ if(!cqe->u0.s.num_fragments) goto exit; len = cqe->u0.s.pkt_size; if (!len) { /*partial DMA workaround for Lancer*/ oce_discard_rx_comp(rq, cqe->u0.s.num_fragments); goto exit; } if (!oce_cqe_portid_valid(sc, cqe)) { oce_discard_rx_comp(rq, cqe->u0.s.num_fragments); goto exit; } /* Get vlan_tag value */ if(IS_BE(sc) || IS_SH(sc)) vtag = BSWAP_16(cqe->u0.s.vlan_tag); else vtag = cqe->u0.s.vlan_tag; cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass; cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass; cq_info.ipv6_frame = cqe->u0.s.ip_ver; cq_info.num_frags = cqe->u0.s.num_fragments; cq_info.pkt_size = cqe->u0.s.pkt_size; oce_rx_mbuf_chain(rq, &cq_info, &m); if (m) { m->m_pkthdr.rcvif = sc->ifp; #if __FreeBSD_version >= 800000 if (rq->queue_index) m->m_pkthdr.flowid = (rq->queue_index - 1); else m->m_pkthdr.flowid = rq->queue_index; M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE); #endif /* This deternies if vlan tag is Valid */ if (oce_cqe_vtp_valid(sc, cqe)) { if (sc->function_mode & FNM_FLEX10_MODE) { /* FLEX10. If QnQ is not set, neglect VLAN */ if (cqe->u0.s.qnq) { m->m_pkthdr.ether_vtag = vtag; m->m_flags |= M_VLANTAG; } } else if (sc->pvid != (vtag & VLAN_VID_MASK)) { /* In UMC mode generally pvid will be striped by hw. But in some cases we have seen it comes with pvid. So if pvid == vlan, neglect vlan. */ m->m_pkthdr.ether_vtag = vtag; m->m_flags |= M_VLANTAG; } } if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1); #if defined(INET6) || defined(INET) /* Try to queue to LRO */ if (IF_LRO_ENABLED(sc) && (cqe->u0.s.ip_cksum_pass) && (cqe->u0.s.l4_cksum_pass) && (!cqe->u0.s.ip_ver) && (rq->lro.lro_cnt != 0)) { if (tcp_lro_rx(&rq->lro, m, 0) == 0) { rq->lro_pkts_queued ++; goto post_done; } /* If LRO posting fails then try to post to STACK */ } #endif (*sc->ifp->if_input) (sc->ifp, m); #if defined(INET6) || defined(INET) post_done: #endif /* Update rx stats per queue */ rq->rx_stats.rx_pkts++; rq->rx_stats.rx_bytes += cqe->u0.s.pkt_size; rq->rx_stats.rx_frags += cqe->u0.s.num_fragments; if (cqe->u0.s.pkt_type == OCE_MULTICAST_PACKET) rq->rx_stats.rx_mcast_pkts++; if (cqe->u0.s.pkt_type == OCE_UNICAST_PACKET) rq->rx_stats.rx_ucast_pkts++; } exit: return; } void oce_discard_rx_comp(struct oce_rq *rq, int num_frags) { uint32_t i = 0; struct oce_packet_desc *pd; POCE_SOFTC sc = (POCE_SOFTC) rq->parent; for (i = 0; i < num_frags; i++) { if (rq->ring->cidx == rq->ring->pidx) { device_printf(sc->dev, "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n"); return; } pd = &rq->pckts[rq->ring->cidx]; bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(rq->tag, pd->map); if (pd->mbuf != NULL) { m_freem(pd->mbuf); pd->mbuf = NULL; } RING_GET(rq->ring, 1); rq->pending--; } } static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe) { struct oce_nic_rx_cqe_v1 *cqe_v1; int vtp = 0; if (sc->be3_native) { cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe; vtp = cqe_v1->u0.s.vlan_tag_present; } else vtp = cqe->u0.s.vlan_tag_present; return vtp; } static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe) { struct oce_nic_rx_cqe_v1 *cqe_v1; int port_id = 0; if (sc->be3_native && (IS_BE(sc) || IS_SH(sc))) { cqe_v1 = (struct oce_nic_rx_cqe_v1 *)cqe; port_id = cqe_v1->u0.s.port; if (sc->port_id != port_id) return 0; } else ;/* For BE3 legacy and Lancer this is dummy */ return 1; } #if defined(INET6) || defined(INET) void oce_rx_flush_lro(struct oce_rq *rq) { struct lro_ctrl *lro = &rq->lro; POCE_SOFTC sc = (POCE_SOFTC) rq->parent; if (!IF_LRO_ENABLED(sc)) return; tcp_lro_flush_all(lro); rq->lro_pkts_queued = 0; return; } static int oce_init_lro(POCE_SOFTC sc) { struct lro_ctrl *lro = NULL; int i = 0, rc = 0; for (i = 0; i < sc->nrqs; i++) { lro = &sc->rq[i]->lro; rc = tcp_lro_init(lro); if (rc != 0) { device_printf(sc->dev, "LRO init failed\n"); return rc; } lro->ifp = sc->ifp; } return rc; } void oce_free_lro(POCE_SOFTC sc) { struct lro_ctrl *lro = NULL; int i = 0; for (i = 0; i < sc->nrqs; i++) { lro = &sc->rq[i]->lro; if (lro) tcp_lro_free(lro); } } #endif int oce_alloc_rx_bufs(struct oce_rq *rq, int count) { POCE_SOFTC sc = (POCE_SOFTC) rq->parent; int i, in, rc; struct oce_packet_desc *pd; bus_dma_segment_t segs[6]; int nsegs, added = 0; struct oce_nic_rqe *rqe; pd_rxulp_db_t rxdb_reg; uint32_t val = 0; uint32_t oce_max_rq_posts = 64; bzero(&rxdb_reg, sizeof(pd_rxulp_db_t)); for (i = 0; i < count; i++) { in = (rq->ring->pidx + 1) % OCE_RQ_PACKET_ARRAY_SIZE; pd = &rq->pckts[rq->ring->pidx]; pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size); if (pd->mbuf == NULL) { device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size); break; } pd->mbuf->m_nextpkt = NULL; pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size; rc = bus_dmamap_load_mbuf_sg(rq->tag, pd->map, pd->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); if (rc) { m_free(pd->mbuf); device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc); break; } if (nsegs != 1) { i--; continue; } bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD); rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe); rqe->u0.s.frag_pa_hi = ADDR_HI(segs[0].ds_addr); rqe->u0.s.frag_pa_lo = ADDR_LO(segs[0].ds_addr); DW_SWAP(u32ptr(rqe), sizeof(struct oce_nic_rqe)); RING_PUT(rq->ring, 1); added++; rq->pending++; } oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS; if (added != 0) { for (i = added / oce_max_rq_posts; i > 0; i--) { rxdb_reg.bits.num_posted = oce_max_rq_posts; rxdb_reg.bits.qid = rq->rq_id; if(rq->islro) { val |= rq->rq_id & DB_LRO_RQ_ID_MASK; val |= oce_max_rq_posts << 16; OCE_WRITE_REG32(sc, db, DB_OFFSET, val); }else { OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0); } added -= oce_max_rq_posts; } if (added > 0) { rxdb_reg.bits.qid = rq->rq_id; rxdb_reg.bits.num_posted = added; if(rq->islro) { val |= rq->rq_id & DB_LRO_RQ_ID_MASK; val |= added << 16; OCE_WRITE_REG32(sc, db, DB_OFFSET, val); }else { OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0); } } } return 0; } static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq) { if (num_cqes) { oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE); if(!sc->enable_hwlro) { if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1) oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1)); }else { if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64) oce_alloc_rx_bufs(rq, 64); } } return; } uint16_t oce_rq_handler_lro(void *arg) { struct oce_rq *rq = (struct oce_rq *)arg; struct oce_cq *cq = rq->cq; POCE_SOFTC sc = rq->parent; struct nic_hwlro_singleton_cqe *cqe; struct nic_hwlro_cqe_part2 *cqe2; int num_cqes = 0; LOCK(&rq->rx_lock); bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe); while (cqe->valid) { if(cqe->cqe_type == 0) { /* singleton cqe */ /* we should not get singleton cqe after cqe1 on same rq */ if(rq->cqe_firstpart != NULL) { device_printf(sc->dev, "Got singleton cqe after cqe1 \n"); goto exit_rq_handler_lro; } if(cqe->error != 0) { rq->rx_stats.rxcp_err++; if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); } oce_rx_lro(rq, cqe, NULL); rq->rx_stats.rx_compl++; cqe->valid = 0; RING_GET(cq->ring, 1); num_cqes++; if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled)) break; }else if(cqe->cqe_type == 0x1) { /* first part */ /* we should not get cqe1 after cqe1 on same rq */ if(rq->cqe_firstpart != NULL) { device_printf(sc->dev, "Got cqe1 after cqe1 \n"); goto exit_rq_handler_lro; } rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe; RING_GET(cq->ring, 1); }else if(cqe->cqe_type == 0x2) { /* second part */ cqe2 = (struct nic_hwlro_cqe_part2 *)cqe; if(cqe2->error != 0) { rq->rx_stats.rxcp_err++; if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); } /* We should not get cqe2 without cqe1 */ if(rq->cqe_firstpart == NULL) { device_printf(sc->dev, "Got cqe2 without cqe1 \n"); goto exit_rq_handler_lro; } oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2); rq->rx_stats.rx_compl++; rq->cqe_firstpart->valid = 0; cqe2->valid = 0; rq->cqe_firstpart = NULL; RING_GET(cq->ring, 1); num_cqes += 2; if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled)) break; } bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe); } oce_check_rx_bufs(sc, num_cqes, rq); exit_rq_handler_lro: UNLOCK(&rq->rx_lock); return 0; } /* Handle the Completion Queue for receive */ uint16_t oce_rq_handler(void *arg) { struct oce_rq *rq = (struct oce_rq *)arg; struct oce_cq *cq = rq->cq; POCE_SOFTC sc = rq->parent; struct oce_nic_rx_cqe *cqe; int num_cqes = 0; if(rq->islro) { oce_rq_handler_lro(arg); return 0; } LOCK(&rq->rx_lock); bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); while (cqe->u0.dw[2]) { DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe)); if (cqe->u0.s.error == 0) { oce_rx(rq, cqe); } else { rq->rx_stats.rxcp_err++; if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1); /* Post L3/L4 errors to stack.*/ oce_rx(rq, cqe); } rq->rx_stats.rx_compl++; cqe->u0.dw[2] = 0; #if defined(INET6) || defined(INET) if (IF_LRO_ENABLED(sc) && rq->lro_pkts_queued >= 16) { oce_rx_flush_lro(rq); } #endif RING_GET(cq->ring, 1); bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe); num_cqes++; if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled)) break; } #if defined(INET6) || defined(INET) if (IF_LRO_ENABLED(sc)) oce_rx_flush_lro(rq); #endif oce_check_rx_bufs(sc, num_cqes, rq); UNLOCK(&rq->rx_lock); return 0; } /***************************************************************************** * Helper function prototypes in this file * *****************************************************************************/ static int oce_attach_ifp(POCE_SOFTC sc) { sc->ifp = if_alloc(IFT_ETHER); if (!sc->ifp) return ENOMEM; ifmedia_init(&sc->media, IFM_IMASK, oce_media_change, oce_media_status); ifmedia_add(&sc->media, IFM_ETHER | IFM_AUTO, 0, NULL); ifmedia_set(&sc->media, IFM_ETHER | IFM_AUTO); sc->ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST; sc->ifp->if_ioctl = oce_ioctl; sc->ifp->if_start = oce_start; sc->ifp->if_init = oce_init; sc->ifp->if_mtu = ETHERMTU; sc->ifp->if_softc = sc; #if __FreeBSD_version >= 800000 sc->ifp->if_transmit = oce_multiq_start; sc->ifp->if_qflush = oce_multiq_flush; #endif if_initname(sc->ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); sc->ifp->if_snd.ifq_drv_maxlen = OCE_MAX_TX_DESC - 1; IFQ_SET_MAXLEN(&sc->ifp->if_snd, sc->ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&sc->ifp->if_snd); sc->ifp->if_hwassist = OCE_IF_HWASSIST; sc->ifp->if_hwassist |= CSUM_TSO; sc->ifp->if_hwassist |= (CSUM_IP | CSUM_TCP | CSUM_UDP); sc->ifp->if_capabilities = OCE_IF_CAPABILITIES; sc->ifp->if_capabilities |= IFCAP_HWCSUM; sc->ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; #if defined(INET6) || defined(INET) sc->ifp->if_capabilities |= IFCAP_TSO; sc->ifp->if_capabilities |= IFCAP_LRO; sc->ifp->if_capabilities |= IFCAP_VLAN_HWTSO; #endif sc->ifp->if_capenable = sc->ifp->if_capabilities; sc->ifp->if_baudrate = IF_Gbps(10); #if __FreeBSD_version >= 1000000 sc->ifp->if_hw_tsomax = 65536 - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN); sc->ifp->if_hw_tsomaxsegcount = OCE_MAX_TX_ELEMENTS; sc->ifp->if_hw_tsomaxsegsize = 4096; #endif ether_ifattach(sc->ifp, sc->macaddr.mac_addr); return 0; } static void oce_add_vlan(void *arg, struct ifnet *ifp, uint16_t vtag) { POCE_SOFTC sc = ifp->if_softc; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) return; sc->vlan_tag[vtag] = 1; sc->vlans_added++; if (sc->vlans_added <= (sc->max_vlans + 1)) oce_vid_config(sc); } static void oce_del_vlan(void *arg, struct ifnet *ifp, uint16_t vtag) { POCE_SOFTC sc = ifp->if_softc; if (ifp->if_softc != arg) return; if ((vtag == 0) || (vtag > 4095)) return; sc->vlan_tag[vtag] = 0; sc->vlans_added--; oce_vid_config(sc); } /* * A max of 64 vlans can be configured in BE. If the user configures * more, place the card in vlan promiscuous mode. */ static int oce_vid_config(POCE_SOFTC sc) { struct normal_vlan vtags[MAX_VLANFILTER_SIZE]; uint16_t ntags = 0, i; int status = 0; if ((sc->vlans_added <= MAX_VLANFILTER_SIZE) && (sc->ifp->if_capenable & IFCAP_VLAN_HWFILTER)) { for (i = 0; i < MAX_VLANS; i++) { if (sc->vlan_tag[i]) { vtags[ntags].vtag = i; ntags++; } } if (ntags) status = oce_config_vlan(sc, (uint8_t) sc->if_id, vtags, ntags, 1, 0); } else status = oce_config_vlan(sc, (uint8_t) sc->if_id, NULL, 0, 1, 1); return status; } static void oce_mac_addr_set(POCE_SOFTC sc) { uint32_t old_pmac_id = sc->pmac_id; int status = 0; status = bcmp((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr, sc->macaddr.size_of_struct); if (!status) return; status = oce_mbox_macaddr_add(sc, (uint8_t *)(IF_LLADDR(sc->ifp)), sc->if_id, &sc->pmac_id); if (!status) { status = oce_mbox_macaddr_del(sc, sc->if_id, old_pmac_id); bcopy((IF_LLADDR(sc->ifp)), sc->macaddr.mac_addr, sc->macaddr.size_of_struct); } if (status) device_printf(sc->dev, "Failed update macaddress\n"); } static int oce_handle_passthrough(struct ifnet *ifp, caddr_t data) { POCE_SOFTC sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *)data; int rc = ENXIO; char cookie[32] = {0}; void *priv_data = ifr_data_get_ptr(ifr); void *ioctl_ptr; uint32_t req_size; struct mbx_hdr req; OCE_DMA_MEM dma_mem; struct mbx_common_get_cntl_attr *fw_cmd; if (copyin(priv_data, cookie, strlen(IOCTL_COOKIE))) return EFAULT; if (memcmp(cookie, IOCTL_COOKIE, strlen(IOCTL_COOKIE))) return EINVAL; ioctl_ptr = (char *)priv_data + strlen(IOCTL_COOKIE); if (copyin(ioctl_ptr, &req, sizeof(struct mbx_hdr))) return EFAULT; req_size = le32toh(req.u0.req.request_length); if (req_size > 65536) return EINVAL; req_size += sizeof(struct mbx_hdr); rc = oce_dma_alloc(sc, req_size, &dma_mem, 0); if (rc) return ENOMEM; if (copyin(ioctl_ptr, OCE_DMAPTR(&dma_mem,char), req_size)) { rc = EFAULT; goto dma_free; } rc = oce_pass_through_mbox(sc, &dma_mem, req_size); if (rc) { rc = EIO; goto dma_free; } if (copyout(OCE_DMAPTR(&dma_mem,char), ioctl_ptr, req_size)) rc = EFAULT; /* firmware is filling all the attributes for this ioctl except the driver version..so fill it */ if(req.u0.rsp.opcode == OPCODE_COMMON_GET_CNTL_ATTRIBUTES) { fw_cmd = (struct mbx_common_get_cntl_attr *) ioctl_ptr; strncpy(fw_cmd->params.rsp.cntl_attr_info.hba_attr.drv_ver_str, COMPONENT_REVISION, strlen(COMPONENT_REVISION)); } dma_free: oce_dma_free(sc, &dma_mem); return rc; } static void oce_eqd_set_periodic(POCE_SOFTC sc) { struct oce_set_eqd set_eqd[OCE_MAX_EQ]; struct oce_aic_obj *aic; struct oce_eq *eqo; uint64_t now = 0, delta; int eqd, i, num = 0; uint32_t tx_reqs = 0, rxpkts = 0, pps; struct oce_wq *wq; struct oce_rq *rq; #define ticks_to_msecs(t) (1000 * (t) / hz) for (i = 0 ; i < sc->neqs; i++) { eqo = sc->eq[i]; aic = &sc->aic_obj[i]; /* When setting the static eq delay from the user space */ if (!aic->enable) { if (aic->ticks) aic->ticks = 0; eqd = aic->et_eqd; goto modify_eqd; } rq = sc->rq[i]; rxpkts = rq->rx_stats.rx_pkts; wq = sc->wq[i]; tx_reqs = wq->tx_stats.tx_reqs; now = ticks; if (!aic->ticks || now < aic->ticks || rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) { aic->prev_rxpkts = rxpkts; aic->prev_txreqs = tx_reqs; aic->ticks = now; continue; } delta = ticks_to_msecs(now - aic->ticks); pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) + (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta); eqd = (pps / 15000) << 2; if (eqd < 8) eqd = 0; /* Make sure that the eq delay is in the known range */ eqd = min(eqd, aic->max_eqd); eqd = max(eqd, aic->min_eqd); aic->prev_rxpkts = rxpkts; aic->prev_txreqs = tx_reqs; aic->ticks = now; modify_eqd: if (eqd != aic->cur_eqd) { set_eqd[num].delay_multiplier = (eqd * 65)/100; set_eqd[num].eq_id = eqo->eq_id; aic->cur_eqd = eqd; num++; } } /* Is there atleast one eq that needs to be modified? */ for(i = 0; i < num; i += 8) { if((num - i) >=8 ) oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8); else oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i)); } } static void oce_detect_hw_error(POCE_SOFTC sc) { uint32_t ue_low = 0, ue_high = 0, ue_low_mask = 0, ue_high_mask = 0; uint32_t sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0; uint32_t i; if (sc->hw_error) return; if (IS_XE201(sc)) { sliport_status = OCE_READ_REG32(sc, db, SLIPORT_STATUS_OFFSET); if (sliport_status & SLIPORT_STATUS_ERR_MASK) { sliport_err1 = OCE_READ_REG32(sc, db, SLIPORT_ERROR1_OFFSET); sliport_err2 = OCE_READ_REG32(sc, db, SLIPORT_ERROR2_OFFSET); } } else { ue_low = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW); ue_high = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HIGH); ue_low_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_LOW_MASK); ue_high_mask = OCE_READ_REG32(sc, devcfg, PCICFG_UE_STATUS_HI_MASK); ue_low = (ue_low & ~ue_low_mask); ue_high = (ue_high & ~ue_high_mask); } /* On certain platforms BE hardware can indicate spurious UEs. * Allow the h/w to stop working completely in case of a real UE. * Hence not setting the hw_error for UE detection. */ if (sliport_status & SLIPORT_STATUS_ERR_MASK) { sc->hw_error = TRUE; device_printf(sc->dev, "Error detected in the card\n"); } if (sliport_status & SLIPORT_STATUS_ERR_MASK) { device_printf(sc->dev, "ERR: sliport status 0x%x\n", sliport_status); device_printf(sc->dev, "ERR: sliport error1 0x%x\n", sliport_err1); device_printf(sc->dev, "ERR: sliport error2 0x%x\n", sliport_err2); } if (ue_low) { for (i = 0; ue_low; ue_low >>= 1, i++) { if (ue_low & 1) device_printf(sc->dev, "UE: %s bit set\n", ue_status_low_desc[i]); } } if (ue_high) { for (i = 0; ue_high; ue_high >>= 1, i++) { if (ue_high & 1) device_printf(sc->dev, "UE: %s bit set\n", ue_status_hi_desc[i]); } } } static void oce_local_timer(void *arg) { POCE_SOFTC sc = arg; int i = 0; oce_detect_hw_error(sc); oce_refresh_nic_stats(sc); oce_refresh_queue_stats(sc); oce_mac_addr_set(sc); /* TX Watch Dog*/ for (i = 0; i < sc->nwqs; i++) oce_tx_restart(sc, sc->wq[i]); /* calculate and set the eq delay for optimal interrupt rate */ if (IS_BE(sc) || IS_SH(sc)) oce_eqd_set_periodic(sc); callout_reset(&sc->timer, hz, oce_local_timer, sc); } static void oce_tx_compl_clean(POCE_SOFTC sc) { struct oce_wq *wq; int i = 0, timeo = 0, num_wqes = 0; int pending_txqs = sc->nwqs; /* Stop polling for compls when HW has been silent for 10ms or * hw_error or no outstanding completions expected */ do { pending_txqs = sc->nwqs; for_all_wq_queues(sc, wq, i) { num_wqes = oce_wq_handler(wq); if(num_wqes) timeo = 0; if(!wq->ring->num_used) pending_txqs--; } if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error) break; DELAY(1000); } while (TRUE); for_all_wq_queues(sc, wq, i) { while(wq->ring->num_used) { LOCK(&wq->tx_compl_lock); oce_process_tx_completion(wq); UNLOCK(&wq->tx_compl_lock); } } } /* NOTE : This should only be called holding * DEVICE_LOCK. */ static void oce_if_deactivate(POCE_SOFTC sc) { int i; struct oce_rq *rq; struct oce_wq *wq; struct oce_eq *eq; sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); oce_tx_compl_clean(sc); /* Stop intrs and finish any bottom halves pending */ oce_hw_intr_disable(sc); /* Since taskqueue_drain takes a Gaint Lock, We should not acquire any other lock. So unlock device lock and require after completing taskqueue_drain. */ UNLOCK(&sc->dev_lock); for (i = 0; i < sc->intr_count; i++) { if (sc->intrs[i].tq != NULL) { taskqueue_drain(sc->intrs[i].tq, &sc->intrs[i].task); } } LOCK(&sc->dev_lock); /* Delete RX queue in card with flush param */ oce_stop_rx(sc); /* Invalidate any pending cq and eq entries*/ for_all_evnt_queues(sc, eq, i) oce_drain_eq(eq); for_all_rq_queues(sc, rq, i) oce_drain_rq_cq(rq); for_all_wq_queues(sc, wq, i) oce_drain_wq_cq(wq); /* But still we need to get MCC aync events. So enable intrs and also arm first EQ */ oce_hw_intr_enable(sc); oce_arm_eq(sc, sc->eq[0]->eq_id, 0, TRUE, FALSE); DELAY(10); } static void oce_if_activate(POCE_SOFTC sc) { struct oce_eq *eq; struct oce_rq *rq; struct oce_wq *wq; int i, rc = 0; sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; oce_hw_intr_disable(sc); oce_start_rx(sc); for_all_rq_queues(sc, rq, i) { rc = oce_start_rq(rq); if (rc) device_printf(sc->dev, "Unable to start RX\n"); } for_all_wq_queues(sc, wq, i) { rc = oce_start_wq(wq); if (rc) device_printf(sc->dev, "Unable to start TX\n"); } for_all_evnt_queues(sc, eq, i) oce_arm_eq(sc, eq->eq_id, 0, TRUE, FALSE); oce_hw_intr_enable(sc); } static void process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe) { /* Update Link status */ if ((acqe->u0.s.link_status & ~ASYNC_EVENT_LOGICAL) == ASYNC_EVENT_LINK_UP) { sc->link_status = ASYNC_EVENT_LINK_UP; if_link_state_change(sc->ifp, LINK_STATE_UP); } else { sc->link_status = ASYNC_EVENT_LINK_DOWN; if_link_state_change(sc->ifp, LINK_STATE_DOWN); } } static void oce_async_grp5_osbmc_process(POCE_SOFTC sc, struct oce_async_evt_grp5_os2bmc *evt) { DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc)); if (evt->u.s.mgmt_enable) sc->flags |= OCE_FLAGS_OS2BMC; else return; sc->bmc_filt_mask = evt->u.s.arp_filter; sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1); sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2); sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3); sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4); sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5); sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6); sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7); sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8); } static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe) { struct oce_async_event_grp5_pvid_state *gcqe; struct oce_async_evt_grp5_os2bmc *bmccqe; switch (cqe->u0.s.async_type) { case ASYNC_EVENT_PVID_STATE: /* GRP5 PVID */ gcqe = (struct oce_async_event_grp5_pvid_state *)cqe; if (gcqe->enabled) sc->pvid = gcqe->tag & VLAN_VID_MASK; else sc->pvid = 0; break; case ASYNC_EVENT_OS2BMC: bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe; oce_async_grp5_osbmc_process(sc, bmccqe); break; default: break; } } /* Handle the Completion Queue for the Mailbox/Async notifications */ uint16_t oce_mq_handler(void *arg) { struct oce_mq *mq = (struct oce_mq *)arg; POCE_SOFTC sc = mq->parent; struct oce_cq *cq = mq->cq; int num_cqes = 0, evt_type = 0, optype = 0; struct oce_mq_cqe *cqe; struct oce_async_cqe_link_state *acqe; struct oce_async_event_qnq *dbgcqe; bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); while (cqe->u0.dw[3]) { DW_SWAP((uint32_t *) cqe, sizeof(oce_mq_cqe)); if (cqe->u0.s.async_event) { evt_type = cqe->u0.s.event_type; optype = cqe->u0.s.async_type; if (evt_type == ASYNC_EVENT_CODE_LINK_STATE) { /* Link status evt */ acqe = (struct oce_async_cqe_link_state *)cqe; process_link_state(sc, acqe); } else if (evt_type == ASYNC_EVENT_GRP5) { oce_process_grp5_events(sc, cqe); } else if (evt_type == ASYNC_EVENT_CODE_DEBUG && optype == ASYNC_EVENT_DEBUG_QNQ) { dbgcqe = (struct oce_async_event_qnq *)cqe; if(dbgcqe->valid) sc->qnqid = dbgcqe->vlan_tag; sc->qnq_debug_event = TRUE; } } cqe->u0.dw[3] = 0; RING_GET(cq->ring, 1); bus_dmamap_sync(cq->ring->dma.tag, cq->ring->dma.map, BUS_DMASYNC_POSTWRITE); cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe); num_cqes++; } if (num_cqes) oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE); return 0; } static void setup_max_queues_want(POCE_SOFTC sc) { /* Check if it is FLEX machine. Is so dont use RSS */ if ((sc->function_mode & FNM_FLEX10_MODE) || (sc->function_mode & FNM_UMC_MODE) || (sc->function_mode & FNM_VNIC_MODE) || (!is_rss_enabled(sc)) || IS_BE2(sc)) { sc->nrqs = 1; sc->nwqs = 1; } else { sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1; sc->nwqs = MIN(OCE_NCPUS, sc->nrssqs); } if (IS_BE2(sc) && is_rss_enabled(sc)) sc->nrqs = MIN(OCE_NCPUS, sc->nrssqs) + 1; } static void update_queues_got(POCE_SOFTC sc) { if (is_rss_enabled(sc)) { sc->nrqs = sc->intr_count + 1; sc->nwqs = sc->intr_count; } else { sc->nrqs = 1; sc->nwqs = 1; } if (IS_BE2(sc)) sc->nwqs = 1; } static int oce_check_ipv6_ext_hdr(struct mbuf *m) { struct ether_header *eh = mtod(m, struct ether_header *); caddr_t m_datatemp = m->m_data; if (eh->ether_type == htons(ETHERTYPE_IPV6)) { m->m_data += sizeof(struct ether_header); struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *); if((ip6->ip6_nxt != IPPROTO_TCP) && \ (ip6->ip6_nxt != IPPROTO_UDP)){ struct ip6_ext *ip6e = NULL; m->m_data += sizeof(struct ip6_hdr); ip6e = (struct ip6_ext *) mtod(m, struct ip6_ext *); if(ip6e->ip6e_len == 0xff) { m->m_data = m_datatemp; return TRUE; } } m->m_data = m_datatemp; } return FALSE; } static int is_be3_a1(POCE_SOFTC sc) { if((sc->flags & OCE_FLAGS_BE3) && ((sc->asic_revision & 0xFF) < 2)) { return TRUE; } return FALSE; } static struct mbuf * oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete) { uint16_t vlan_tag = 0; if(!M_WRITABLE(m)) return NULL; /* Embed vlan tag in the packet if it is not part of it */ if(m->m_flags & M_VLANTAG) { vlan_tag = EVL_VLANOFTAG(m->m_pkthdr.ether_vtag); m->m_flags &= ~M_VLANTAG; } /* if UMC, ignore vlan tag insertion and instead insert pvid */ if(sc->pvid) { if(!vlan_tag) vlan_tag = sc->pvid; if (complete) *complete = FALSE; } if(vlan_tag) { m = ether_vlanencap(m, vlan_tag); } if(sc->qnqid) { m = ether_vlanencap(m, sc->qnqid); if (complete) *complete = FALSE; } return m; } static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m) { if(is_be3_a1(sc) && IS_QNQ_OR_UMC(sc) && \ oce_check_ipv6_ext_hdr(m)) { return TRUE; } return FALSE; } static void oce_get_config(POCE_SOFTC sc) { int rc = 0; uint32_t max_rss = 0; if ((IS_BE(sc) || IS_SH(sc)) && (!sc->be3_native)) max_rss = OCE_LEGACY_MODE_RSS; else max_rss = OCE_MAX_RSS; if (!IS_BE(sc)) { rc = oce_get_profile_config(sc, max_rss); if (rc) { sc->nwqs = OCE_MAX_WQ; sc->nrssqs = max_rss; sc->nrqs = sc->nrssqs + 1; } } else { /* For BE3 don't rely on fw for determining the resources */ sc->nrssqs = max_rss; sc->nrqs = sc->nrssqs + 1; sc->nwqs = OCE_MAX_WQ; sc->max_vlans = MAX_VLANFILTER_SIZE; } } static void oce_rdma_close(void) { if (oce_rdma_if != NULL) { oce_rdma_if = NULL; } } static void oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr) { memcpy(macaddr, sc->macaddr.mac_addr, 6); } int oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if) { POCE_SOFTC sc; struct oce_dev_info di; int i; if ((rdma_info == NULL) || (rdma_if == NULL)) { return -EINVAL; } if ((rdma_info->size != OCE_RDMA_INFO_SIZE) || (rdma_if->size != OCE_RDMA_IF_SIZE)) { return -ENXIO; } rdma_info->close = oce_rdma_close; rdma_info->mbox_post = oce_mbox_post; rdma_info->common_req_hdr_init = mbx_common_req_hdr_init; rdma_info->get_mac_addr = oce_get_mac_addr; oce_rdma_if = rdma_if; sc = softc_head; while (sc != NULL) { if (oce_rdma_if->announce != NULL) { memset(&di, 0, sizeof(di)); di.dev = sc->dev; di.softc = sc; di.ifp = sc->ifp; di.db_bhandle = sc->db_bhandle; di.db_btag = sc->db_btag; di.db_page_size = 4096; if (sc->flags & OCE_FLAGS_USING_MSIX) { di.intr_mode = OCE_INTERRUPT_MODE_MSIX; } else if (sc->flags & OCE_FLAGS_USING_MSI) { di.intr_mode = OCE_INTERRUPT_MODE_MSI; } else { di.intr_mode = OCE_INTERRUPT_MODE_INTX; } di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) { di.msix.num_vectors = sc->intr_count + sc->roce_intr_count; di.msix.start_vector = sc->intr_count; for (i=0; iintrs[i].vector; } } else { } memcpy(di.mac_addr, sc->macaddr.mac_addr, 6); di.vendor_id = pci_get_vendor(sc->dev); di.dev_id = pci_get_device(sc->dev); if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) { di.flags |= OCE_RDMA_INFO_RDMA_SUPPORTED; } rdma_if->announce(&di); sc = sc->next; } } return 0; } static void oce_read_env_variables( POCE_SOFTC sc ) { char *value = NULL; int rc = 0; /* read if user wants to enable hwlro or swlro */ //value = getenv("oce_enable_hwlro"); if(value && IS_SH(sc)) { sc->enable_hwlro = strtol(value, NULL, 10); if(sc->enable_hwlro) { rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL); if(rc) { device_printf(sc->dev, "no hardware lro support\n"); device_printf(sc->dev, "software lro enabled\n"); sc->enable_hwlro = 0; }else { device_printf(sc->dev, "hardware lro enabled\n"); oce_max_rsp_handled = 32; } }else { device_printf(sc->dev, "software lro enabled\n"); } }else { sc->enable_hwlro = 0; } /* read mbuf size */ //value = getenv("oce_rq_buf_size"); if(value && IS_SH(sc)) { oce_rq_buf_size = strtol(value, NULL, 10); switch(oce_rq_buf_size) { case 2048: case 4096: case 9216: case 16384: break; default: device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n"); oce_rq_buf_size = 2048; } } return; } Index: head/sys/dev/pccbb/pccbb_pci.c =================================================================== --- head/sys/dev/pccbb/pccbb_pci.c (revision 338948) +++ head/sys/dev/pccbb/pccbb_pci.c (revision 338949) @@ -1,986 +1,988 @@ /*- * SPDX-License-Identifier: BSD-2-Clause-FreeBSD * * Copyright (c) 2002-2004 M. Warner Losh. * Copyright (c) 2000-2001 Jonathan Chen. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ /*- * Copyright (c) 1998, 1999 and 2000 * HAYAKAWA Koichi. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by HAYAKAWA Koichi. * 4. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * Driver for PCI to CardBus Bridge chips * * References: * TI Datasheets: * http://www-s.ti.com/cgi-bin/sc/generic2.cgi?family=PCI+CARDBUS+CONTROLLERS * * Written by Jonathan Chen * The author would like to acknowledge: * * HAYAKAWA Koichi: Author of the NetBSD code for the same thing * * Warner Losh: Newbus/newcard guru and author of the pccard side of things * * YAMAMOTO Shigeru: Author of another FreeBSD cardbus driver * * David Cross: Author of the initial ugly hack for a specific cardbus card */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "power_if.h" #include "card_if.h" #include "pcib_if.h" #define DPRINTF(x) do { if (cbb_debug) printf x; } while (0) #define DEVPRINTF(x) do { if (cbb_debug) device_printf x; } while (0) #define PCI_MASK_CONFIG(DEV,REG,MASK,SIZE) \ pci_write_config(DEV, REG, pci_read_config(DEV, REG, SIZE) MASK, SIZE) #define PCI_MASK2_CONFIG(DEV,REG,MASK1,MASK2,SIZE) \ pci_write_config(DEV, REG, ( \ pci_read_config(DEV, REG, SIZE) MASK1) MASK2, SIZE) static void cbb_chipinit(struct cbb_softc *sc); static int cbb_pci_filt(void *arg); static struct yenta_chipinfo { uint32_t yc_id; const char *yc_name; int yc_chiptype; } yc_chipsets[] = { /* Texas Instruments chips */ {PCIC_ID_TI1031, "TI1031 PCI-PC Card Bridge", CB_TI113X}, {PCIC_ID_TI1130, "TI1130 PCI-CardBus Bridge", CB_TI113X}, {PCIC_ID_TI1131, "TI1131 PCI-CardBus Bridge", CB_TI113X}, {PCIC_ID_TI1210, "TI1210 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1211, "TI1211 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1220, "TI1220 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1221, "TI1221 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1225, "TI1225 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1250, "TI1250 PCI-CardBus Bridge", CB_TI125X}, {PCIC_ID_TI1251, "TI1251 PCI-CardBus Bridge", CB_TI125X}, {PCIC_ID_TI1251B,"TI1251B PCI-CardBus Bridge",CB_TI125X}, {PCIC_ID_TI1260, "TI1260 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1260B,"TI1260B PCI-CardBus Bridge",CB_TI12XX}, {PCIC_ID_TI1410, "TI1410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1420, "TI1420 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1421, "TI1421 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1450, "TI1450 PCI-CardBus Bridge", CB_TI125X}, /*SIC!*/ {PCIC_ID_TI1451, "TI1451 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1510, "TI1510 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI1520, "TI1520 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4410, "TI4410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4450, "TI4450 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4451, "TI4451 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI4510, "TI4510 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI6411, "TI6411 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI6420, "TI6420 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI6420SC, "TI6420 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7410, "TI7410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7510, "TI7510 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610M, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610SD, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_TI7610MS, "TI7610 PCI-CardBus Bridge", CB_TI12XX}, /* ENE */ {PCIC_ID_ENE_CB710, "ENE CB710 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB720, "ENE CB720 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1211, "ENE CB1211 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1225, "ENE CB1225 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1410, "ENE CB1410 PCI-CardBus Bridge", CB_TI12XX}, {PCIC_ID_ENE_CB1420, "ENE CB1420 PCI-CardBus Bridge", CB_TI12XX}, /* Ricoh chips */ {PCIC_ID_RICOH_RL5C465, "RF5C465 PCI-CardBus Bridge", CB_RF5C46X}, {PCIC_ID_RICOH_RL5C466, "RF5C466 PCI-CardBus Bridge", CB_RF5C46X}, {PCIC_ID_RICOH_RL5C475, "RF5C475 PCI-CardBus Bridge", CB_RF5C47X}, {PCIC_ID_RICOH_RL5C476, "RF5C476 PCI-CardBus Bridge", CB_RF5C47X}, {PCIC_ID_RICOH_RL5C477, "RF5C477 PCI-CardBus Bridge", CB_RF5C47X}, {PCIC_ID_RICOH_RL5C478, "RF5C478 PCI-CardBus Bridge", CB_RF5C47X}, /* Toshiba products */ {PCIC_ID_TOPIC95, "ToPIC95 PCI-CardBus Bridge", CB_TOPIC95}, {PCIC_ID_TOPIC95B, "ToPIC95B PCI-CardBus Bridge", CB_TOPIC95}, {PCIC_ID_TOPIC97, "ToPIC97 PCI-CardBus Bridge", CB_TOPIC97}, {PCIC_ID_TOPIC100, "ToPIC100 PCI-CardBus Bridge", CB_TOPIC97}, /* Cirrus Logic */ {PCIC_ID_CLPD6832, "CLPD6832 PCI-CardBus Bridge", CB_CIRRUS}, {PCIC_ID_CLPD6833, "CLPD6833 PCI-CardBus Bridge", CB_CIRRUS}, {PCIC_ID_CLPD6834, "CLPD6834 PCI-CardBus Bridge", CB_CIRRUS}, /* 02Micro */ {PCIC_ID_OZ6832, "O2Micro OZ6832/6833 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6860, "O2Micro OZ6836/6860 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6872, "O2Micro OZ6812/6872 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6912, "O2Micro OZ6912/6972 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6922, "O2Micro OZ6922 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ6933, "O2Micro OZ6933 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711E1, "O2Micro OZ711E1 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711EC1, "O2Micro OZ711EC1/M1 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711E2, "O2Micro OZ711E2 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711M1, "O2Micro OZ711M1 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711M2, "O2Micro OZ711M2 PCI-CardBus Bridge", CB_O2MICRO}, {PCIC_ID_OZ711M3, "O2Micro OZ711M3 PCI-CardBus Bridge", CB_O2MICRO}, /* SMC */ {PCIC_ID_SMC_34C90, "SMC 34C90 PCI-CardBus Bridge", CB_CIRRUS}, /* sentinel */ {0 /* null id */, "unknown", CB_UNKNOWN}, }; /************************************************************************/ /* Probe/Attach */ /************************************************************************/ static int cbb_chipset(uint32_t pci_id, const char **namep) { struct yenta_chipinfo *ycp; for (ycp = yc_chipsets; ycp->yc_id != 0 && pci_id != ycp->yc_id; ++ycp) continue; if (namep != NULL) *namep = ycp->yc_name; return (ycp->yc_chiptype); } static int cbb_pci_probe(device_t brdev) { const char *name; uint32_t progif; uint32_t baseclass; uint32_t subclass; /* * Do we know that we support the chipset? If so, then we * accept the device. */ if (cbb_chipset(pci_get_devid(brdev), &name) != CB_UNKNOWN) { device_set_desc(brdev, name); return (BUS_PROBE_DEFAULT); } /* * We do support generic CardBus bridges. All that we've seen * to date have progif 0 (the Yenta spec, and successors mandate * this). */ baseclass = pci_get_class(brdev); subclass = pci_get_subclass(brdev); progif = pci_get_progif(brdev); if (baseclass == PCIC_BRIDGE && subclass == PCIS_BRIDGE_CARDBUS && progif == 0) { device_set_desc(brdev, "PCI-CardBus Bridge"); return (BUS_PROBE_GENERIC); } return (ENXIO); } /* * Print out the config space */ static void cbb_print_config(device_t dev) { int i; device_printf(dev, "PCI Configuration space:"); for (i = 0; i < 256; i += 4) { if (i % 16 == 0) printf("\n 0x%02x: ", i); printf("0x%08x ", pci_read_config(dev, i, 4)); } printf("\n"); } static int cbb_pci_attach(device_t brdev) { #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) static int curr_bus_number = 2; /* XXX EVILE BAD (see below) */ uint32_t pribus; #endif struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev); struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; int rid; device_t parent; parent = device_get_parent(brdev); mtx_init(&sc->mtx, device_get_nameunit(brdev), "cbb", MTX_DEF); sc->chipset = cbb_chipset(pci_get_devid(brdev), NULL); sc->dev = brdev; sc->cbdev = NULL; sc->exca[0].pccarddev = NULL; sc->domain = pci_get_domain(brdev); sc->pribus = pcib_get_bus(parent); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) pci_write_config(brdev, PCIR_PRIBUS_2, sc->pribus, 1); pcib_setup_secbus(brdev, &sc->bus, 1); #else sc->bus.sec = pci_read_config(brdev, PCIR_SECBUS_2, 1); sc->bus.sub = pci_read_config(brdev, PCIR_SUBBUS_2, 1); #endif SLIST_INIT(&sc->rl); rid = CBBR_SOCKBASE; sc->base_res = bus_alloc_resource_any(brdev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->base_res) { device_printf(brdev, "Could not map register memory\n"); mtx_destroy(&sc->mtx); return (ENOMEM); } else { DEVPRINTF((brdev, "Found memory at %jx\n", rman_get_start(sc->base_res))); } sc->bst = rman_get_bustag(sc->base_res); sc->bsh = rman_get_bushandle(sc->base_res); exca_init(&sc->exca[0], brdev, sc->bst, sc->bsh, CBB_EXCA_OFFSET); sc->exca[0].flags |= EXCA_HAS_MEMREG_WIN; sc->exca[0].chipset = EXCA_CARDBUS; sc->chipinit = cbb_chipinit; sc->chipinit(sc); /*Sysctls*/ sctx = device_get_sysctl_ctx(brdev); soid = device_get_sysctl_tree(brdev); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain", CTLFLAG_RD, &sc->domain, 0, "Domain number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus", CTLFLAG_RD, &sc->pribus, 0, "Primary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus", CTLFLAG_RD, &sc->bus.sec, 0, "Secondary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus", CTLFLAG_RD, &sc->bus.sub, 0, "Subordinate bus number"); #if 0 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "memory", CTLFLAG_RD, &sc->subbus, 0, "Memory window open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "premem", CTLFLAG_RD, &sc->subbus, 0, "Prefetch memory window open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "io1", CTLFLAG_RD, &sc->subbus, 0, "io range 1 open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "io2", CTLFLAG_RD, &sc->subbus, 0, "io range 2 open"); #endif #if !(defined(NEW_PCIB) && defined(PCI_RES_BUS)) /* * This is a gross hack. We should be scanning the entire pci * tree, assigning bus numbers in a way such that we (1) can * reserve 1 extra bus just in case and (2) all sub buses * are in an appropriate range. */ DEVPRINTF((brdev, "Secondary bus is %d\n", sc->bus.sec)); pribus = pci_read_config(brdev, PCIR_PRIBUS_2, 1); if (sc->bus.sec == 0 || sc->pribus != pribus) { if (curr_bus_number <= sc->pribus) curr_bus_number = sc->pribus + 1; if (pribus != sc->pribus) { DEVPRINTF((brdev, "Setting primary bus to %d\n", sc->pribus)); pci_write_config(brdev, PCIR_PRIBUS_2, sc->pribus, 1); } sc->bus.sec = curr_bus_number++; sc->bus.sub = curr_bus_number++; DEVPRINTF((brdev, "Secondary bus set to %d subbus %d\n", sc->bus.sec, sc->bus.sub)); pci_write_config(brdev, PCIR_SECBUS_2, sc->bus.sec, 1); pci_write_config(brdev, PCIR_SUBBUS_2, sc->bus.sub, 1); } #endif /* attach children */ sc->cbdev = device_add_child(brdev, "cardbus", -1); if (sc->cbdev == NULL) DEVPRINTF((brdev, "WARNING: cannot add cardbus bus.\n")); else if (device_probe_and_attach(sc->cbdev) != 0) DEVPRINTF((brdev, "WARNING: cannot attach cardbus bus!\n")); sc->exca[0].pccarddev = device_add_child(brdev, "pccard", -1); if (sc->exca[0].pccarddev == NULL) DEVPRINTF((brdev, "WARNING: cannot add pccard bus.\n")); else if (device_probe_and_attach(sc->exca[0].pccarddev) != 0) DEVPRINTF((brdev, "WARNING: cannot attach pccard bus.\n")); /* Map and establish the interrupt. */ rid = 0; sc->irq_res = bus_alloc_resource_any(brdev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(brdev, "Unable to map IRQ...\n"); goto err; } if (bus_setup_intr(brdev, sc->irq_res, INTR_TYPE_AV | INTR_MPSAFE, cbb_pci_filt, NULL, sc, &sc->intrhand)) { device_printf(brdev, "couldn't establish interrupt\n"); goto err; } /* reset 16-bit pcmcia bus */ exca_clrb(&sc->exca[0], EXCA_INTR, EXCA_INTR_RESET); /* turn off power */ cbb_power(brdev, CARD_OFF); /* CSC Interrupt: Card detect interrupt on */ cbb_setb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_CD); /* reset interrupt */ cbb_set(sc, CBB_SOCKET_EVENT, cbb_get(sc, CBB_SOCKET_EVENT)); if (bootverbose) cbb_print_config(brdev); /* Start the thread */ if (kproc_create(cbb_event_thread, sc, &sc->event_thread, 0, 0, "%s event thread", device_get_nameunit(brdev))) { device_printf(brdev, "unable to create event thread.\n"); panic("cbb_create_event_thread"); } sc->sc_root_token = root_mount_hold(device_get_nameunit(sc->dev)); return (0); err: if (sc->irq_res) bus_release_resource(brdev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->base_res) { bus_release_resource(brdev, SYS_RES_MEMORY, CBBR_SOCKBASE, sc->base_res); } mtx_destroy(&sc->mtx); return (ENOMEM); } static int cbb_pci_detach(device_t brdev) { #if defined(NEW_PCIB) && defined(PCI_RES_BUS) struct cbb_softc *sc = device_get_softc(brdev); #endif int error; error = cbb_detach(brdev); #if defined(NEW_PCIB) && defined(PCI_RES_BUS) if (error == 0) pcib_free_secbus(brdev, &sc->bus); #endif return (error); } static void cbb_chipinit(struct cbb_softc *sc) { uint32_t mux, sysctrl, reg; /* Set CardBus latency timer */ if (pci_read_config(sc->dev, PCIR_SECLAT_2, 1) < 0x20) pci_write_config(sc->dev, PCIR_SECLAT_2, 0x20, 1); /* Set PCI latency timer */ if (pci_read_config(sc->dev, PCIR_LATTIMER, 1) < 0x20) pci_write_config(sc->dev, PCIR_LATTIMER, 0x20, 1); /* Enable DMA, memory access for this card and I/O access for children */ pci_enable_busmaster(sc->dev); pci_enable_io(sc->dev, SYS_RES_IOPORT); pci_enable_io(sc->dev, SYS_RES_MEMORY); /* disable Legacy IO */ switch (sc->chipset) { case CB_RF5C46X: PCI_MASK_CONFIG(sc->dev, CBBR_BRIDGECTRL, & ~(CBBM_BRIDGECTRL_RL_3E0_EN | CBBM_BRIDGECTRL_RL_3E2_EN), 2); break; default: pci_write_config(sc->dev, CBBR_LEGACY, 0x0, 4); break; } /* Use PCI interrupt for interrupt routing */ PCI_MASK2_CONFIG(sc->dev, CBBR_BRIDGECTRL, & ~(CBBM_BRIDGECTRL_MASTER_ABORT | CBBM_BRIDGECTRL_INTR_IREQ_ISA_EN), | CBBM_BRIDGECTRL_WRITE_POST_EN, 2); /* * XXX this should be a function table, ala OLDCARD. This means * that we could more easily support ISA interrupts for pccard * cards if we had to. */ switch (sc->chipset) { case CB_TI113X: /* * The TI 1031, TI 1130 and TI 1131 all require another bit * be set to enable PCI routing of interrupts, and then * a bit for each of the CSC and Function interrupts we * want routed. */ PCI_MASK_CONFIG(sc->dev, CBBR_CBCTRL, | CBBM_CBCTRL_113X_PCI_INTR | CBBM_CBCTRL_113X_PCI_CSC | CBBM_CBCTRL_113X_PCI_IRQ_EN, 1); PCI_MASK_CONFIG(sc->dev, CBBR_DEVCTRL, & ~(CBBM_DEVCTRL_INT_SERIAL | CBBM_DEVCTRL_INT_PCI), 1); break; case CB_TI12XX: /* * Some TI 12xx (and [14][45]xx) based pci cards * sometimes have issues with the MFUNC register not * being initialized due to a bad EEPROM on board. * Laptops that this matters on have this register * properly initialized. * * The TI125X parts have a different register. * * Note: Only the lower two nibbles matter. When set * to 0, the MFUNC{0,1} pins are GPIO, which isn't * going to work out too well because we specifically * program these parts to parallel interrupt signalling * elsewhere. We preserve the upper bits of this * register since changing them have subtle side effects * for different variants of the card and are * extremely difficult to exaustively test. * * Also, the TI 1510/1520 changed the default for the MFUNC * register from 0x0 to 0x1000 to enable IRQSER by default. * We want to be careful to avoid overriding that, and the * below test will do that. Should this check prove to be * too permissive, we should just check against 0 and 0x1000 * and not touch it otherwise. */ mux = pci_read_config(sc->dev, CBBR_MFUNC, 4); sysctrl = pci_read_config(sc->dev, CBBR_SYSCTRL, 4); if ((mux & (CBBM_MFUNC_PIN0 | CBBM_MFUNC_PIN1)) == 0) { mux = (mux & ~CBBM_MFUNC_PIN0) | CBBM_MFUNC_PIN0_INTA; if ((sysctrl & CBBM_SYSCTRL_INTRTIE) == 0) mux = (mux & ~CBBM_MFUNC_PIN1) | CBBM_MFUNC_PIN1_INTB; pci_write_config(sc->dev, CBBR_MFUNC, mux, 4); } /*FALLTHROUGH*/ case CB_TI125X: /* * Disable zoom video. Some machines initialize this * improperly and exerpience has shown that this helps * prevent strange behavior. We don't support zoom * video anyway, so no harm can come from this. */ pci_write_config(sc->dev, CBBR_MMCTRL, 0, 4); break; case CB_O2MICRO: /* * Issue #1: INT# generated at the same time as * selected ISA IRQ. When IREQ# or STSCHG# is active, * in addition to the ISA IRQ being generated, INT# * will also be generated at the same time. * * Some of the older controllers have an issue in * which the slot's PCI INT# will be asserted whenever * IREQ# or STSCGH# is asserted even if ExCA registers * 03h or 05h have an ISA IRQ selected. * * The fix for this issue, which will work for any * controller (old or new), is to set ExCA registers * 3Ah (slot 0) & 7Ah (slot 1) bits 7:4 = 1010b. * These bits are undocumented. By setting this * register (of each slot) to '1010xxxxb' a routing of * IREQ# to INTC# and STSCHG# to INTC# is selected. * Since INTC# isn't connected there will be no * unexpected PCI INT when IREQ# or STSCHG# is active. * However, INTA# (slot 0) or INTB# (slot 1) will * still be correctly generated if NO ISA IRQ is * selected (ExCA regs 03h or 05h are cleared). */ reg = exca_getb(&sc->exca[0], EXCA_O2MICRO_CTRL_C); reg = (reg & 0x0f) | EXCA_O2CC_IREQ_INTC | EXCA_O2CC_STSCHG_INTC; exca_putb(&sc->exca[0], EXCA_O2MICRO_CTRL_C, reg); break; case CB_TOPIC97: /* * Disable Zoom Video, ToPIC 97, 100. */ pci_write_config(sc->dev, TOPIC97_ZV_CONTROL, 0, 1); /* * ToPIC 97, 100 * At offset 0xa1: INTERRUPT CONTROL register * 0x1: Turn on INT interrupts. */ PCI_MASK_CONFIG(sc->dev, TOPIC_INTCTRL, | TOPIC97_INTCTRL_INTIRQSEL, 1); /* * ToPIC97, 100 * Need to assert support for low voltage cards */ exca_setb(&sc->exca[0], EXCA_TOPIC97_CTRL, EXCA_TOPIC97_CTRL_LV_MASK); goto topic_common; case CB_TOPIC95: /* * SOCKETCTRL appears to be TOPIC 95/B specific */ PCI_MASK_CONFIG(sc->dev, TOPIC95_SOCKETCTRL, | TOPIC95_SOCKETCTRL_SCR_IRQSEL, 4); topic_common:; /* * At offset 0xa0: SLOT CONTROL * 0x80 Enable CardBus Functionality * 0x40 Enable CardBus and PC Card registers * 0x20 Lock ID in exca regs * 0x10 Write protect ID in config regs * Clear the rest of the bits, which defaults the slot * in legacy mode to 0x3e0 and offset 0. (legacy * mode is determined elsewhere) */ pci_write_config(sc->dev, TOPIC_SLOTCTRL, TOPIC_SLOTCTRL_SLOTON | TOPIC_SLOTCTRL_SLOTEN | TOPIC_SLOTCTRL_ID_LOCK | TOPIC_SLOTCTRL_ID_WP, 1); /* * At offset 0xa3 Card Detect Control Register * 0x80 CARDBUS enbale * 0x01 Cleared for hardware change detect */ PCI_MASK2_CONFIG(sc->dev, TOPIC_CDC, | TOPIC_CDC_CARDBUS, & ~TOPIC_CDC_SWDETECT, 4); break; } /* * Need to tell ExCA registers to CSC interrupts route via PCI * interrupts. There are two ways to do this. One is to set * INTR_ENABLE and the other is to set CSC to 0. Since both * methods are mutually compatible, we do both. */ exca_putb(&sc->exca[0], EXCA_INTR, EXCA_INTR_ENABLE); exca_putb(&sc->exca[0], EXCA_CSC_INTR, 0); cbb_disable_func_intr(sc); /* close all memory and io windows */ pci_write_config(sc->dev, CBBR_MEMBASE0, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_MEMLIMIT0, 0, 4); pci_write_config(sc->dev, CBBR_MEMBASE1, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_MEMLIMIT1, 0, 4); pci_write_config(sc->dev, CBBR_IOBASE0, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_IOLIMIT0, 0, 4); pci_write_config(sc->dev, CBBR_IOBASE1, 0xffffffff, 4); pci_write_config(sc->dev, CBBR_IOLIMIT1, 0, 4); } static int cbb_route_interrupt(device_t pcib, device_t dev, int pin) { struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(pcib); return (rman_get_start(sc->irq_res)); } static int cbb_pci_shutdown(device_t brdev) { struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev); /* * We're about to pull the rug out from the card, so mark it as * gone to prevent harm. */ sc->cardok = 0; /* * Place the cards in reset, turn off the interrupts and power * down the socket. */ PCI_MASK_CONFIG(brdev, CBBR_BRIDGECTRL, |CBBM_BRIDGECTRL_RESET, 2); exca_clrb(&sc->exca[0], EXCA_INTR, EXCA_INTR_RESET); cbb_set(sc, CBB_SOCKET_MASK, 0); cbb_set(sc, CBB_SOCKET_EVENT, 0xffffffff); cbb_power(brdev, CARD_OFF); /* * For paranoia, turn off all address decoding. Really not needed, * it seems, but it can't hurt */ exca_putb(&sc->exca[0], EXCA_ADDRWIN_ENABLE, 0); pci_write_config(brdev, CBBR_MEMBASE0, 0, 4); pci_write_config(brdev, CBBR_MEMLIMIT0, 0, 4); pci_write_config(brdev, CBBR_MEMBASE1, 0, 4); pci_write_config(brdev, CBBR_MEMLIMIT1, 0, 4); pci_write_config(brdev, CBBR_IOBASE0, 0, 4); pci_write_config(brdev, CBBR_IOLIMIT0, 0, 4); pci_write_config(brdev, CBBR_IOBASE1, 0, 4); pci_write_config(brdev, CBBR_IOLIMIT1, 0, 4); return (0); } static int cbb_pci_filt(void *arg) { struct cbb_softc *sc = arg; uint32_t sockevent; uint8_t csc; int retval = FILTER_STRAY; /* * Some chips also require us to read the old ExCA registe for card * status change when we route CSC vis PCI. This isn't supposed to be * required, but it clears the interrupt state on some chipsets. * Maybe there's a setting that would obviate its need. Maybe we * should test the status bits and deal with them, but so far we've * not found any machines that don't also give us the socket status * indication above. * * This call used to be unconditional. However, further research * suggests that we hit this condition when the card READY interrupt * fired. So now we only read it for 16-bit cards, and we only claim * the interrupt if READY is set. If this still causes problems, then * the next step would be to read this if we have a 16-bit card *OR* * we have no card. We treat the READY signal as if it were the power * completion signal. Some bridges may double signal things here, bit * signalling twice should be OK since we only sleep on the powerintr * in one place and a double wakeup would be benign there. */ if (sc->flags & CBB_16BIT_CARD) { csc = exca_getb(&sc->exca[0], EXCA_CSC); if (csc & EXCA_CSC_READY) { atomic_add_int(&sc->powerintr, 1); wakeup((void *)&sc->powerintr); retval = FILTER_HANDLED; } } /* * Read the socket event. Sometimes, the theory goes, the PCI bus is * so loaded that it cannot satisfy the read request, so we get * garbage back from the following read. We have to filter out the * garbage so that we don't spontaneously reset the card under high * load. PCI isn't supposed to act like this. No doubt this is a bug * in the PCI bridge chipset (or cbb brige) that's being used in * certain amd64 laptops today. Work around the issue by assuming * that any bits we don't know about being set means that we got * garbage. */ sockevent = cbb_get(sc, CBB_SOCKET_EVENT); if (sockevent != 0 && (sockevent & ~CBB_SOCKET_EVENT_VALID_MASK) == 0) { /* * If anything has happened to the socket, we assume that the * card is no longer OK, and we shouldn't call its ISR. We * set cardok as soon as we've attached the card. This helps * in a noisy eject, which happens all too often when users * are ejecting their PC Cards. * * We use this method in preference to checking to see if the * card is still there because the check suffers from a race * condition in the bouncing case. */ #define DELTA (CBB_SOCKET_MASK_CD) if (sockevent & DELTA) { cbb_clrb(sc, CBB_SOCKET_MASK, DELTA); cbb_set(sc, CBB_SOCKET_EVENT, DELTA); sc->cardok = 0; cbb_disable_func_intr(sc); wakeup(&sc->intrhand); } #undef DELTA /* * Wakeup anybody waiting for a power interrupt. We have to * use atomic_add_int for wakups on other cores. */ if (sockevent & CBB_SOCKET_EVENT_POWER) { cbb_clrb(sc, CBB_SOCKET_MASK, CBB_SOCKET_EVENT_POWER); cbb_set(sc, CBB_SOCKET_EVENT, CBB_SOCKET_EVENT_POWER); atomic_add_int(&sc->powerintr, 1); wakeup((void *)&sc->powerintr); } /* * Status change interrupts aren't presently used in the * rest of the driver. For now, just ACK them. */ if (sockevent & CBB_SOCKET_EVENT_CSTS) cbb_set(sc, CBB_SOCKET_EVENT, CBB_SOCKET_EVENT_CSTS); retval = FILTER_HANDLED; } return retval; } #if defined(NEW_PCIB) && defined(PCI_RES_BUS) static struct resource * cbb_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct cbb_softc *sc; sc = device_get_softc(bus); if (type == PCI_RES_BUS) return (pcib_alloc_subbus(&sc->bus, child, rid, start, end, count, flags)); return (cbb_alloc_resource(bus, child, type, rid, start, end, count, flags)); } static int cbb_pci_adjust_resource(device_t bus, device_t child, int type, struct resource *r, rman_res_t start, rman_res_t end) { struct cbb_softc *sc; sc = device_get_softc(bus); if (type == PCI_RES_BUS) { if (!rman_is_region_manager(r, &sc->bus.rman)) return (EINVAL); return (rman_adjust_resource(r, start, end)); } return (bus_generic_adjust_resource(bus, child, type, r, start, end)); } static int cbb_pci_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { struct cbb_softc *sc; int error; sc = device_get_softc(bus); if (type == PCI_RES_BUS) { if (!rman_is_region_manager(r, &sc->bus.rman)) return (EINVAL); if (rman_get_flags(r) & RF_ACTIVE) { error = bus_deactivate_resource(child, type, rid, r); if (error) return (error); } return (rman_release_resource(r)); } return (cbb_release_resource(bus, child, type, rid, r)); } #endif /************************************************************************/ /* PCI compat methods */ /************************************************************************/ static int cbb_maxslots(device_t brdev) { return (0); } static uint32_t cbb_read_config(device_t brdev, u_int b, u_int s, u_int f, u_int reg, int width) { /* * Pass through to the next ppb up the chain (i.e. our grandparent). */ return (PCIB_READ_CONFIG(device_get_parent(device_get_parent(brdev)), b, s, f, reg, width)); } static void cbb_write_config(device_t brdev, u_int b, u_int s, u_int f, u_int reg, uint32_t val, int width) { /* * Pass through to the next ppb up the chain (i.e. our grandparent). */ PCIB_WRITE_CONFIG(device_get_parent(device_get_parent(brdev)), b, s, f, reg, val, width); } static int cbb_pci_suspend(device_t brdev) { int error = 0; struct cbb_softc *sc = device_get_softc(brdev); error = bus_generic_suspend(brdev); if (error != 0) return (error); cbb_set(sc, CBB_SOCKET_MASK, 0); /* Quiet hardware */ sc->cardok = 0; /* Card is bogus now */ return (0); } static int cbb_pci_resume(device_t brdev) { int error = 0; struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev); uint32_t tmp; /* * In the APM and early ACPI era, BIOSes saved the PCI config * registers. As chips became more complicated, that functionality moved * into the ACPI code / tables. We must therefore, restore the settings * we made here to make sure the device come back. Transitions to Dx * from D0 and back to D0 cause the bridge to lose its config space, so * all the bus mappings and such are preserved. * * The PCI layer handles standard PCI registers like the * command register and BARs, but cbb-specific registers are * handled here. */ sc->chipinit(sc); /* reset interrupt -- Do we really need to do this? */ tmp = cbb_get(sc, CBB_SOCKET_EVENT); cbb_set(sc, CBB_SOCKET_EVENT, tmp); /* CSC Interrupt: Card detect interrupt on */ cbb_setb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_CD); /* Signal the thread to wakeup. */ wakeup(&sc->intrhand); error = bus_generic_resume(brdev); return (error); } static device_method_t cbb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cbb_pci_probe), DEVMETHOD(device_attach, cbb_pci_attach), DEVMETHOD(device_detach, cbb_pci_detach), DEVMETHOD(device_shutdown, cbb_pci_shutdown), DEVMETHOD(device_suspend, cbb_pci_suspend), DEVMETHOD(device_resume, cbb_pci_resume), /* bus methods */ DEVMETHOD(bus_read_ivar, cbb_read_ivar), DEVMETHOD(bus_write_ivar, cbb_write_ivar), #if defined(NEW_PCIB) && defined(PCI_RES_BUS) DEVMETHOD(bus_alloc_resource, cbb_pci_alloc_resource), DEVMETHOD(bus_adjust_resource, cbb_pci_adjust_resource), DEVMETHOD(bus_release_resource, cbb_pci_release_resource), #else DEVMETHOD(bus_alloc_resource, cbb_alloc_resource), DEVMETHOD(bus_release_resource, cbb_release_resource), #endif DEVMETHOD(bus_activate_resource, cbb_activate_resource), DEVMETHOD(bus_deactivate_resource, cbb_deactivate_resource), DEVMETHOD(bus_driver_added, cbb_driver_added), DEVMETHOD(bus_child_detached, cbb_child_detached), DEVMETHOD(bus_setup_intr, cbb_setup_intr), DEVMETHOD(bus_teardown_intr, cbb_teardown_intr), DEVMETHOD(bus_child_present, cbb_child_present), /* 16-bit card interface */ DEVMETHOD(card_set_res_flags, cbb_pcic_set_res_flags), DEVMETHOD(card_set_memory_offset, cbb_pcic_set_memory_offset), /* power interface */ DEVMETHOD(power_enable_socket, cbb_power_enable_socket), DEVMETHOD(power_disable_socket, cbb_power_disable_socket), /* pcib compatibility interface */ DEVMETHOD(pcib_maxslots, cbb_maxslots), DEVMETHOD(pcib_read_config, cbb_read_config), DEVMETHOD(pcib_write_config, cbb_write_config), DEVMETHOD(pcib_route_interrupt, cbb_route_interrupt), DEVMETHOD_END }; static driver_t cbb_driver = { "cbb", cbb_methods, sizeof(struct cbb_softc) }; DRIVER_MODULE(cbb, pci, cbb_driver, cbb_devclass, 0, 0); +MODULE_PNP_INFO("W32:vendor/device;D:#", pci, cbb, yc_chipsets, + nitems(yc_chipsets) - 1); MODULE_DEPEND(cbb, exca, 1, 1, 1); Index: head/sys/dev/pcn/if_pcn.c =================================================================== --- head/sys/dev/pcn/if_pcn.c (revision 338948) +++ head/sys/dev/pcn/if_pcn.c (revision 338949) @@ -1,1520 +1,1522 @@ /*- * SPDX-License-Identifier: BSD-4-Clause * * Copyright (c) 2000 Berkeley Software Design, Inc. * Copyright (c) 1997, 1998, 1999, 2000 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * AMD Am79c972 fast ethernet PCI NIC driver. Datasheets are available * from http://www.amd.com. * * The AMD PCnet/PCI controllers are more advanced and functional * versions of the venerable 7990 LANCE. The PCnet/PCI chips retain * backwards compatibility with the LANCE and thus can be made * to work with older LANCE drivers. This is in fact how the * PCnet/PCI chips were supported in FreeBSD originally. The trouble * is that the PCnet/PCI devices offer several performance enhancements * which can't be exploited in LANCE compatibility mode. Chief among * these enhancements is the ability to perform PCI DMA operations * using 32-bit addressing (which eliminates the need for ISA * bounce-buffering), and special receive buffer alignment (which * allows the receive handler to pass packets to the upper protocol * layers without copying on both the x86 and alpha platforms). */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* for vtophys */ #include /* for vtophys */ #include #include #include #include #include #include #include #include #define PCN_USEIOSPACE #include MODULE_DEPEND(pcn, pci, 1, 1, 1); MODULE_DEPEND(pcn, ether, 1, 1, 1); MODULE_DEPEND(pcn, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" /* * Various supported device vendors/types and their names. */ static const struct pcn_type pcn_devs[] = { { PCN_VENDORID, PCN_DEVICEID_PCNET, "AMD PCnet/PCI 10/100BaseTX" }, { PCN_VENDORID, PCN_DEVICEID_HOME, "AMD PCnet/Home HomePNA" }, { 0, 0, NULL } }; static const struct pcn_chipid { u_int32_t id; const char *name; } pcn_chipid[] = { { Am79C971, "Am79C971" }, { Am79C972, "Am79C972" }, { Am79C973, "Am79C973" }, { Am79C978, "Am79C978" }, { Am79C975, "Am79C975" }, { Am79C976, "Am79C976" }, { 0, NULL }, }; static const char *pcn_chipid_name(u_int32_t); static u_int32_t pcn_chip_id(device_t); static const struct pcn_type *pcn_match(u_int16_t, u_int16_t); static u_int32_t pcn_csr_read(struct pcn_softc *, int); static u_int16_t pcn_csr_read16(struct pcn_softc *, int); static u_int16_t pcn_bcr_read16(struct pcn_softc *, int); static void pcn_csr_write(struct pcn_softc *, int, int); static u_int32_t pcn_bcr_read(struct pcn_softc *, int); static void pcn_bcr_write(struct pcn_softc *, int, int); static int pcn_probe(device_t); static int pcn_attach(device_t); static int pcn_detach(device_t); static int pcn_newbuf(struct pcn_softc *, int, struct mbuf *); static int pcn_encap(struct pcn_softc *, struct mbuf *, u_int32_t *); static void pcn_rxeof(struct pcn_softc *); static void pcn_txeof(struct pcn_softc *); static void pcn_intr(void *); static void pcn_tick(void *); static void pcn_start(struct ifnet *); static void pcn_start_locked(struct ifnet *); static int pcn_ioctl(struct ifnet *, u_long, caddr_t); static void pcn_init(void *); static void pcn_init_locked(struct pcn_softc *); static void pcn_stop(struct pcn_softc *); static void pcn_watchdog(struct pcn_softc *); static int pcn_shutdown(device_t); static int pcn_ifmedia_upd(struct ifnet *); static void pcn_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int pcn_miibus_readreg(device_t, int, int); static int pcn_miibus_writereg(device_t, int, int, int); static void pcn_miibus_statchg(device_t); static void pcn_setfilt(struct ifnet *); static void pcn_setmulti(struct pcn_softc *); static void pcn_reset(struct pcn_softc *); static int pcn_list_rx_init(struct pcn_softc *); static int pcn_list_tx_init(struct pcn_softc *); #ifdef PCN_USEIOSPACE #define PCN_RES SYS_RES_IOPORT #define PCN_RID PCN_PCI_LOIO #else #define PCN_RES SYS_RES_MEMORY #define PCN_RID PCN_PCI_LOMEM #endif static device_method_t pcn_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pcn_probe), DEVMETHOD(device_attach, pcn_attach), DEVMETHOD(device_detach, pcn_detach), DEVMETHOD(device_shutdown, pcn_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, pcn_miibus_readreg), DEVMETHOD(miibus_writereg, pcn_miibus_writereg), DEVMETHOD(miibus_statchg, pcn_miibus_statchg), DEVMETHOD_END }; static driver_t pcn_driver = { "pcn", pcn_methods, sizeof(struct pcn_softc) }; static devclass_t pcn_devclass; DRIVER_MODULE(pcn, pci, pcn_driver, pcn_devclass, 0, 0); +MODULE_PNP_INFO("U16:vendor; U16:device", pci, pcn, pcn_devs, + nitems(pcn_devs) - 1); DRIVER_MODULE(miibus, pcn, miibus_driver, miibus_devclass, 0, 0); #define PCN_CSR_SETBIT(sc, reg, x) \ pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) | (x)) #define PCN_CSR_CLRBIT(sc, reg, x) \ pcn_csr_write(sc, reg, pcn_csr_read(sc, reg) & ~(x)) #define PCN_BCR_SETBIT(sc, reg, x) \ pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) | (x)) #define PCN_BCR_CLRBIT(sc, reg, x) \ pcn_bcr_write(sc, reg, pcn_bcr_read(sc, reg) & ~(x)) static u_int32_t pcn_csr_read(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); return(CSR_READ_4(sc, PCN_IO32_RDP)); } static u_int16_t pcn_csr_read16(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_2(sc, PCN_IO16_RAP, reg); return(CSR_READ_2(sc, PCN_IO16_RDP)); } static void pcn_csr_write(sc, reg, val) struct pcn_softc *sc; int reg; int val; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); CSR_WRITE_4(sc, PCN_IO32_RDP, val); return; } static u_int32_t pcn_bcr_read(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); return(CSR_READ_4(sc, PCN_IO32_BDP)); } static u_int16_t pcn_bcr_read16(sc, reg) struct pcn_softc *sc; int reg; { CSR_WRITE_2(sc, PCN_IO16_RAP, reg); return(CSR_READ_2(sc, PCN_IO16_BDP)); } static void pcn_bcr_write(sc, reg, val) struct pcn_softc *sc; int reg; int val; { CSR_WRITE_4(sc, PCN_IO32_RAP, reg); CSR_WRITE_4(sc, PCN_IO32_BDP, val); return; } static int pcn_miibus_readreg(dev, phy, reg) device_t dev; int phy, reg; { struct pcn_softc *sc; int val; sc = device_get_softc(dev); /* * At least Am79C971 with DP83840A wedge when isolating the * external PHY so we can't allow multiple external PHYs. * There are cards that use Am79C971 with both the internal * and an external PHY though. * For internal PHYs it doesn't really matter whether we can * isolate the remaining internal and the external ones in * the PHY drivers as the internal PHYs have to be enabled * individually in PCN_BCR_PHYSEL, PCN_CSR_MODE, etc. * With Am79C97{3,5,8} we don't support switching beetween * the internal and external PHYs, yet, so we can't allow * multiple PHYs with these either. * Am79C97{2,6} actually only support external PHYs (not * connectable internal ones respond at the usual addresses, * which don't hurt if we let them show up on the bus) and * isolating them works. */ if (((sc->pcn_type == Am79C971 && phy != PCN_PHYAD_10BT) || sc->pcn_type == Am79C973 || sc->pcn_type == Am79C975 || sc->pcn_type == Am79C978) && sc->pcn_extphyaddr != -1 && phy != sc->pcn_extphyaddr) return(0); pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5)); val = pcn_bcr_read(sc, PCN_BCR_MIIDATA) & 0xFFFF; if (val == 0xFFFF) return(0); if (((sc->pcn_type == Am79C971 && phy != PCN_PHYAD_10BT) || sc->pcn_type == Am79C973 || sc->pcn_type == Am79C975 || sc->pcn_type == Am79C978) && sc->pcn_extphyaddr == -1) sc->pcn_extphyaddr = phy; return(val); } static int pcn_miibus_writereg(dev, phy, reg, data) device_t dev; int phy, reg, data; { struct pcn_softc *sc; sc = device_get_softc(dev); pcn_bcr_write(sc, PCN_BCR_MIIADDR, reg | (phy << 5)); pcn_bcr_write(sc, PCN_BCR_MIIDATA, data); return(0); } static void pcn_miibus_statchg(dev) device_t dev; { struct pcn_softc *sc; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->pcn_miibus); if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) { PCN_BCR_SETBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN); } else { PCN_BCR_CLRBIT(sc, PCN_BCR_DUPLEX, PCN_DUPLEX_FDEN); } return; } static void pcn_setmulti(sc) struct pcn_softc *sc; { struct ifnet *ifp; struct ifmultiaddr *ifma; u_int32_t h, i; u_int16_t hashes[4] = { 0, 0, 0, 0 }; ifp = sc->pcn_ifp; PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { for (i = 0; i < 4; i++) pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0xFFFF); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); return; } /* first, zot all the existing hash bits */ for (i = 0; i < 4; i++) pcn_csr_write(sc, PCN_CSR_MAR0 + i, 0); /* now program new ones */ if_maddr_rlock(ifp); CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_le(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; hashes[h >> 4] |= 1 << (h & 0xF); } if_maddr_runlock(ifp); for (i = 0; i < 4; i++) pcn_csr_write(sc, PCN_CSR_MAR0 + i, hashes[i]); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); return; } static void pcn_reset(sc) struct pcn_softc *sc; { /* * Issue a reset by reading from the RESET register. * Note that we don't know if the chip is operating in * 16-bit or 32-bit mode at this point, so we attempt * to reset the chip both ways. If one fails, the other * will succeed. */ CSR_READ_2(sc, PCN_IO16_RESET); CSR_READ_4(sc, PCN_IO32_RESET); /* Wait a little while for the chip to get its brains in order. */ DELAY(1000); /* Select 32-bit (DWIO) mode */ CSR_WRITE_4(sc, PCN_IO32_RDP, 0); /* Select software style 3. */ pcn_bcr_write(sc, PCN_BCR_SSTYLE, PCN_SWSTYLE_PCNETPCI_BURST); return; } static const char * pcn_chipid_name(u_int32_t id) { const struct pcn_chipid *p; p = pcn_chipid; while (p->name) { if (id == p->id) return (p->name); p++; } return ("Unknown"); } static u_int32_t pcn_chip_id(device_t dev) { struct pcn_softc *sc; u_int32_t chip_id; sc = device_get_softc(dev); /* * Note: we can *NOT* put the chip into * 32-bit mode yet. The le(4) driver will only * work in 16-bit mode, and once the chip * goes into 32-bit mode, the only way to * get it out again is with a hardware reset. * So if pcn_probe() is called before the * le(4) driver's probe routine, the chip will * be locked into 32-bit operation and the * le(4) driver will be unable to attach to it. * Note II: if the chip happens to already * be in 32-bit mode, we still need to check * the chip ID, but first we have to detect * 32-bit mode using only 16-bit operations. * The safest way to do this is to read the * PCI subsystem ID from BCR23/24 and compare * that with the value read from PCI config * space. */ chip_id = pcn_bcr_read16(sc, PCN_BCR_PCISUBSYSID); chip_id <<= 16; chip_id |= pcn_bcr_read16(sc, PCN_BCR_PCISUBVENID); /* * Note III: the test for 0x10001000 is a hack to * pacify VMware, who's pseudo-PCnet interface is * broken. Reading the subsystem register from PCI * config space yields 0x00000000 while reading the * same value from I/O space yields 0x10001000. It's * not supposed to be that way. */ if (chip_id == pci_read_config(dev, PCIR_SUBVEND_0, 4) || chip_id == 0x10001000) { /* We're in 16-bit mode. */ chip_id = pcn_csr_read16(sc, PCN_CSR_CHIPID1); chip_id <<= 16; chip_id |= pcn_csr_read16(sc, PCN_CSR_CHIPID0); } else { /* We're in 32-bit mode. */ chip_id = pcn_csr_read(sc, PCN_CSR_CHIPID1); chip_id <<= 16; chip_id |= pcn_csr_read(sc, PCN_CSR_CHIPID0); } return (chip_id); } static const struct pcn_type * pcn_match(u_int16_t vid, u_int16_t did) { const struct pcn_type *t; t = pcn_devs; while (t->pcn_name != NULL) { if ((vid == t->pcn_vid) && (did == t->pcn_did)) return (t); t++; } return (NULL); } /* * Probe for an AMD chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int pcn_probe(dev) device_t dev; { const struct pcn_type *t; struct pcn_softc *sc; int rid; u_int32_t chip_id; t = pcn_match(pci_get_vendor(dev), pci_get_device(dev)); if (t == NULL) return (ENXIO); sc = device_get_softc(dev); /* * Temporarily map the I/O space so we can read the chip ID register. */ rid = PCN_RID; sc->pcn_res = bus_alloc_resource_any(dev, PCN_RES, &rid, RF_ACTIVE); if (sc->pcn_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); return(ENXIO); } sc->pcn_btag = rman_get_bustag(sc->pcn_res); sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res); chip_id = pcn_chip_id(dev); bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); switch((chip_id >> 12) & PART_MASK) { case Am79C971: case Am79C972: case Am79C973: case Am79C975: case Am79C976: case Am79C978: break; default: return(ENXIO); } device_set_desc(dev, t->pcn_name); return(BUS_PROBE_DEFAULT); } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int pcn_attach(dev) device_t dev; { u_int32_t eaddr[2]; struct pcn_softc *sc; struct mii_data *mii; struct mii_softc *miisc; struct ifnet *ifp; int error = 0, rid; sc = device_get_softc(dev); /* Initialize our mutex. */ mtx_init(&sc->pcn_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); /* * Map control/status registers. */ pci_enable_busmaster(dev); /* Retrieve the chip ID */ sc->pcn_type = (pcn_chip_id(dev) >> 12) & PART_MASK; device_printf(dev, "Chip ID %04x (%s)\n", sc->pcn_type, pcn_chipid_name(sc->pcn_type)); rid = PCN_RID; sc->pcn_res = bus_alloc_resource_any(dev, PCN_RES, &rid, RF_ACTIVE); if (sc->pcn_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } sc->pcn_btag = rman_get_bustag(sc->pcn_res); sc->pcn_bhandle = rman_get_bushandle(sc->pcn_res); /* Allocate interrupt */ rid = 0; sc->pcn_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->pcn_irq == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } /* Reset the adapter. */ pcn_reset(sc); /* * Get station address from the EEPROM. */ eaddr[0] = CSR_READ_4(sc, PCN_IO32_APROM00); eaddr[1] = CSR_READ_4(sc, PCN_IO32_APROM01); callout_init_mtx(&sc->pcn_stat_callout, &sc->pcn_mtx, 0); sc->pcn_ldata = contigmalloc(sizeof(struct pcn_list_data), M_DEVBUF, M_NOWAIT, 0, 0xffffffff, PAGE_SIZE, 0); if (sc->pcn_ldata == NULL) { device_printf(dev, "no memory for list buffers!\n"); error = ENXIO; goto fail; } bzero(sc->pcn_ldata, sizeof(struct pcn_list_data)); ifp = sc->pcn_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = pcn_ioctl; ifp->if_start = pcn_start; ifp->if_init = pcn_init; ifp->if_snd.ifq_maxlen = PCN_TX_LIST_CNT - 1; /* * Do MII setup. * See the comment in pcn_miibus_readreg() for why we can't * universally pass MIIF_NOISOLATE here. */ sc->pcn_extphyaddr = -1; error = mii_attach(dev, &sc->pcn_miibus, ifp, pcn_ifmedia_upd, pcn_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } /* * Record the media instances of internal PHYs, which map the * built-in interfaces to the MII, so we can set the active * PHY/port based on the currently selected media. */ sc->pcn_inst_10bt = -1; mii = device_get_softc(sc->pcn_miibus); LIST_FOREACH(miisc, &mii->mii_phys, mii_list) { switch (miisc->mii_phy) { case PCN_PHYAD_10BT: sc->pcn_inst_10bt = miisc->mii_inst; break; /* * XXX deal with the Am79C97{3,5} internal 100baseT * and the Am79C978 internal HomePNA PHYs. */ } } /* * Call MI attach routine. */ ether_ifattach(ifp, (u_int8_t *) eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->pcn_irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, pcn_intr, sc, &sc->pcn_intrhand); if (error) { device_printf(dev, "couldn't set up irq\n"); ether_ifdetach(ifp); goto fail; } fail: if (error) pcn_detach(dev); return(error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int pcn_detach(dev) device_t dev; { struct pcn_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->pcn_ifp; KASSERT(mtx_initialized(&sc->pcn_mtx), ("pcn mutex not initialized")); /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { PCN_LOCK(sc); pcn_reset(sc); pcn_stop(sc); PCN_UNLOCK(sc); callout_drain(&sc->pcn_stat_callout); ether_ifdetach(ifp); } if (sc->pcn_miibus) device_delete_child(dev, sc->pcn_miibus); bus_generic_detach(dev); if (sc->pcn_intrhand) bus_teardown_intr(dev, sc->pcn_irq, sc->pcn_intrhand); if (sc->pcn_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->pcn_irq); if (sc->pcn_res) bus_release_resource(dev, PCN_RES, PCN_RID, sc->pcn_res); if (ifp) if_free(ifp); if (sc->pcn_ldata) { contigfree(sc->pcn_ldata, sizeof(struct pcn_list_data), M_DEVBUF); } mtx_destroy(&sc->pcn_mtx); return(0); } /* * Initialize the transmit descriptors. */ static int pcn_list_tx_init(sc) struct pcn_softc *sc; { struct pcn_list_data *ld; struct pcn_ring_data *cd; int i; cd = &sc->pcn_cdata; ld = sc->pcn_ldata; for (i = 0; i < PCN_TX_LIST_CNT; i++) { cd->pcn_tx_chain[i] = NULL; ld->pcn_tx_list[i].pcn_tbaddr = 0; ld->pcn_tx_list[i].pcn_txctl = 0; ld->pcn_tx_list[i].pcn_txstat = 0; } cd->pcn_tx_prod = cd->pcn_tx_cons = cd->pcn_tx_cnt = 0; return(0); } /* * Initialize the RX descriptors and allocate mbufs for them. */ static int pcn_list_rx_init(sc) struct pcn_softc *sc; { struct pcn_ring_data *cd; int i; cd = &sc->pcn_cdata; for (i = 0; i < PCN_RX_LIST_CNT; i++) { if (pcn_newbuf(sc, i, NULL) == ENOBUFS) return(ENOBUFS); } cd->pcn_rx_prod = 0; return(0); } /* * Initialize an RX descriptor and attach an MBUF cluster. */ static int pcn_newbuf(sc, idx, m) struct pcn_softc *sc; int idx; struct mbuf *m; { struct mbuf *m_new = NULL; struct pcn_rx_desc *c; c = &sc->pcn_ldata->pcn_rx_list[idx]; if (m == NULL) { MGETHDR(m_new, M_NOWAIT, MT_DATA); if (m_new == NULL) return(ENOBUFS); if (!(MCLGET(m_new, M_NOWAIT))) { m_freem(m_new); return(ENOBUFS); } m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; } else { m_new = m; m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; m_new->m_data = m_new->m_ext.ext_buf; } m_adj(m_new, ETHER_ALIGN); sc->pcn_cdata.pcn_rx_chain[idx] = m_new; c->pcn_rbaddr = vtophys(mtod(m_new, caddr_t)); c->pcn_bufsz = (~(PCN_RXLEN) + 1) & PCN_RXLEN_BUFSZ; c->pcn_bufsz |= PCN_RXLEN_MBO; c->pcn_rxstat = PCN_RXSTAT_STP|PCN_RXSTAT_ENP|PCN_RXSTAT_OWN; return(0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. */ static void pcn_rxeof(sc) struct pcn_softc *sc; { struct mbuf *m; struct ifnet *ifp; struct pcn_rx_desc *cur_rx; int i; PCN_LOCK_ASSERT(sc); ifp = sc->pcn_ifp; i = sc->pcn_cdata.pcn_rx_prod; while(PCN_OWN_RXDESC(&sc->pcn_ldata->pcn_rx_list[i])) { cur_rx = &sc->pcn_ldata->pcn_rx_list[i]; m = sc->pcn_cdata.pcn_rx_chain[i]; sc->pcn_cdata.pcn_rx_chain[i] = NULL; /* * If an error occurs, update stats, clear the * status word and leave the mbuf cluster in place: * it should simply get re-used next time this descriptor * comes up in the ring. */ if (cur_rx->pcn_rxstat & PCN_RXSTAT_ERR) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); pcn_newbuf(sc, i, m); PCN_INC(i, PCN_RX_LIST_CNT); continue; } if (pcn_newbuf(sc, i, NULL)) { /* Ran out of mbufs; recycle this one. */ pcn_newbuf(sc, i, m); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); PCN_INC(i, PCN_RX_LIST_CNT); continue; } PCN_INC(i, PCN_RX_LIST_CNT); /* No errors; receive the packet. */ if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_len = m->m_pkthdr.len = cur_rx->pcn_rxlen - ETHER_CRC_LEN; m->m_pkthdr.rcvif = ifp; PCN_UNLOCK(sc); (*ifp->if_input)(ifp, m); PCN_LOCK(sc); } sc->pcn_cdata.pcn_rx_prod = i; return; } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void pcn_txeof(sc) struct pcn_softc *sc; { struct pcn_tx_desc *cur_tx = NULL; struct ifnet *ifp; u_int32_t idx; ifp = sc->pcn_ifp; /* * Go through our tx list and free mbufs for those * frames that have been transmitted. */ idx = sc->pcn_cdata.pcn_tx_cons; while (idx != sc->pcn_cdata.pcn_tx_prod) { cur_tx = &sc->pcn_ldata->pcn_tx_list[idx]; if (!PCN_OWN_TXDESC(cur_tx)) break; if (!(cur_tx->pcn_txctl & PCN_TXCTL_ENP)) { sc->pcn_cdata.pcn_tx_cnt--; PCN_INC(idx, PCN_TX_LIST_CNT); continue; } if (cur_tx->pcn_txctl & PCN_TXCTL_ERR) { if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (cur_tx->pcn_txstat & PCN_TXSTAT_EXDEF) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); if (cur_tx->pcn_txstat & PCN_TXSTAT_RTRY) if_inc_counter(ifp, IFCOUNTER_COLLISIONS, 1); } if_inc_counter(ifp, IFCOUNTER_COLLISIONS, cur_tx->pcn_txstat & PCN_TXSTAT_TRC); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); if (sc->pcn_cdata.pcn_tx_chain[idx] != NULL) { m_freem(sc->pcn_cdata.pcn_tx_chain[idx]); sc->pcn_cdata.pcn_tx_chain[idx] = NULL; } sc->pcn_cdata.pcn_tx_cnt--; PCN_INC(idx, PCN_TX_LIST_CNT); } if (idx != sc->pcn_cdata.pcn_tx_cons) { /* Some buffers have been freed. */ sc->pcn_cdata.pcn_tx_cons = idx; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } sc->pcn_timer = (sc->pcn_cdata.pcn_tx_cnt == 0) ? 0 : 5; return; } static void pcn_tick(xsc) void *xsc; { struct pcn_softc *sc; struct mii_data *mii; struct ifnet *ifp; sc = xsc; ifp = sc->pcn_ifp; PCN_LOCK_ASSERT(sc); mii = device_get_softc(sc->pcn_miibus); mii_tick(mii); /* link just died */ if (sc->pcn_link && !(mii->mii_media_status & IFM_ACTIVE)) sc->pcn_link = 0; /* link just came up, restart */ if (!sc->pcn_link && mii->mii_media_status & IFM_ACTIVE && IFM_SUBTYPE(mii->mii_media_active) != IFM_NONE) { sc->pcn_link++; if (ifp->if_snd.ifq_head != NULL) pcn_start_locked(ifp); } if (sc->pcn_timer > 0 && --sc->pcn_timer == 0) pcn_watchdog(sc); callout_reset(&sc->pcn_stat_callout, hz, pcn_tick, sc); return; } static void pcn_intr(arg) void *arg; { struct pcn_softc *sc; struct ifnet *ifp; u_int32_t status; sc = arg; ifp = sc->pcn_ifp; PCN_LOCK(sc); /* Suppress unwanted interrupts */ if (!(ifp->if_flags & IFF_UP)) { pcn_stop(sc); PCN_UNLOCK(sc); return; } CSR_WRITE_4(sc, PCN_IO32_RAP, PCN_CSR_CSR); while ((status = CSR_READ_4(sc, PCN_IO32_RDP)) & PCN_CSR_INTR) { CSR_WRITE_4(sc, PCN_IO32_RDP, status); if (status & PCN_CSR_RINT) pcn_rxeof(sc); if (status & PCN_CSR_TINT) pcn_txeof(sc); if (status & PCN_CSR_ERR) { pcn_init_locked(sc); break; } } if (ifp->if_snd.ifq_head != NULL) pcn_start_locked(ifp); PCN_UNLOCK(sc); return; } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int pcn_encap(sc, m_head, txidx) struct pcn_softc *sc; struct mbuf *m_head; u_int32_t *txidx; { struct pcn_tx_desc *f = NULL; struct mbuf *m; int frag, cur, cnt = 0; /* * Start packing the mbufs in this chain into * the fragment pointers. Stop when we run out * of fragments or hit the end of the mbuf chain. */ m = m_head; cur = frag = *txidx; for (m = m_head; m != NULL; m = m->m_next) { if (m->m_len == 0) continue; if ((PCN_TX_LIST_CNT - (sc->pcn_cdata.pcn_tx_cnt + cnt)) < 2) return(ENOBUFS); f = &sc->pcn_ldata->pcn_tx_list[frag]; f->pcn_txctl = (~(m->m_len) + 1) & PCN_TXCTL_BUFSZ; f->pcn_txctl |= PCN_TXCTL_MBO; f->pcn_tbaddr = vtophys(mtod(m, vm_offset_t)); if (cnt == 0) f->pcn_txctl |= PCN_TXCTL_STP; else f->pcn_txctl |= PCN_TXCTL_OWN; cur = frag; PCN_INC(frag, PCN_TX_LIST_CNT); cnt++; } if (m != NULL) return(ENOBUFS); sc->pcn_cdata.pcn_tx_chain[cur] = m_head; sc->pcn_ldata->pcn_tx_list[cur].pcn_txctl |= PCN_TXCTL_ENP|PCN_TXCTL_ADD_FCS|PCN_TXCTL_MORE_LTINT; sc->pcn_ldata->pcn_tx_list[*txidx].pcn_txctl |= PCN_TXCTL_OWN; sc->pcn_cdata.pcn_tx_cnt += cnt; *txidx = frag; return(0); } /* * Main transmit routine. To avoid having to do mbuf copies, we put pointers * to the mbuf data regions directly in the transmit lists. We also save a * copy of the pointers since the transmit list fragment pointers are * physical addresses. */ static void pcn_start(ifp) struct ifnet *ifp; { struct pcn_softc *sc; sc = ifp->if_softc; PCN_LOCK(sc); pcn_start_locked(ifp); PCN_UNLOCK(sc); } static void pcn_start_locked(ifp) struct ifnet *ifp; { struct pcn_softc *sc; struct mbuf *m_head = NULL; u_int32_t idx; sc = ifp->if_softc; PCN_LOCK_ASSERT(sc); if (!sc->pcn_link) return; idx = sc->pcn_cdata.pcn_tx_prod; if (ifp->if_drv_flags & IFF_DRV_OACTIVE) return; while(sc->pcn_cdata.pcn_tx_chain[idx] == NULL) { IF_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (pcn_encap(sc, m_head, &idx)) { IF_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } /* * If there's a BPF listener, bounce a copy of this frame * to him. */ BPF_MTAP(ifp, m_head); } /* Transmit */ sc->pcn_cdata.pcn_tx_prod = idx; pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_TX|PCN_CSR_INTEN); /* * Set a timeout in case the chip goes out to lunch. */ sc->pcn_timer = 5; return; } static void pcn_setfilt(ifp) struct ifnet *ifp; { struct pcn_softc *sc; sc = ifp->if_softc; /* If we want promiscuous mode, set the allframes bit. */ if (ifp->if_flags & IFF_PROMISC) { PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC); } else { PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_PROMISC); } /* Set the capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) { PCN_CSR_CLRBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD); } else { PCN_CSR_SETBIT(sc, PCN_CSR_MODE, PCN_MODE_RXNOBROAD); } return; } static void pcn_init(xsc) void *xsc; { struct pcn_softc *sc = xsc; PCN_LOCK(sc); pcn_init_locked(sc); PCN_UNLOCK(sc); } static void pcn_init_locked(sc) struct pcn_softc *sc; { struct ifnet *ifp = sc->pcn_ifp; struct mii_data *mii = NULL; struct ifmedia_entry *ife; PCN_LOCK_ASSERT(sc); /* * Cancel pending I/O and free all RX/TX buffers. */ pcn_stop(sc); pcn_reset(sc); mii = device_get_softc(sc->pcn_miibus); ife = mii->mii_media.ifm_cur; /* Set MAC address */ pcn_csr_write(sc, PCN_CSR_PAR0, ((u_int16_t *)IF_LLADDR(sc->pcn_ifp))[0]); pcn_csr_write(sc, PCN_CSR_PAR1, ((u_int16_t *)IF_LLADDR(sc->pcn_ifp))[1]); pcn_csr_write(sc, PCN_CSR_PAR2, ((u_int16_t *)IF_LLADDR(sc->pcn_ifp))[2]); /* Init circular RX list. */ if (pcn_list_rx_init(sc) == ENOBUFS) { if_printf(ifp, "initialization failed: no " "memory for rx buffers\n"); pcn_stop(sc); return; } /* * Init tx descriptors. */ pcn_list_tx_init(sc); /* Clear PCN_MISC_ASEL so we can set the port via PCN_CSR_MODE. */ PCN_BCR_CLRBIT(sc, PCN_BCR_MISCCFG, PCN_MISC_ASEL); /* * Set up the port based on the currently selected media. * For Am79C978 we've to unconditionally set PCN_PORT_MII and * set the PHY in PCN_BCR_PHYSEL instead. */ if (sc->pcn_type != Am79C978 && IFM_INST(ife->ifm_media) == sc->pcn_inst_10bt) pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_10BASET); else pcn_csr_write(sc, PCN_CSR_MODE, PCN_PORT_MII); /* Set up RX filter. */ pcn_setfilt(ifp); /* * Load the multicast filter. */ pcn_setmulti(sc); /* * Load the addresses of the RX and TX lists. */ pcn_csr_write(sc, PCN_CSR_RXADDR0, vtophys(&sc->pcn_ldata->pcn_rx_list[0]) & 0xFFFF); pcn_csr_write(sc, PCN_CSR_RXADDR1, (vtophys(&sc->pcn_ldata->pcn_rx_list[0]) >> 16) & 0xFFFF); pcn_csr_write(sc, PCN_CSR_TXADDR0, vtophys(&sc->pcn_ldata->pcn_tx_list[0]) & 0xFFFF); pcn_csr_write(sc, PCN_CSR_TXADDR1, (vtophys(&sc->pcn_ldata->pcn_tx_list[0]) >> 16) & 0xFFFF); /* Set the RX and TX ring sizes. */ pcn_csr_write(sc, PCN_CSR_RXRINGLEN, (~PCN_RX_LIST_CNT) + 1); pcn_csr_write(sc, PCN_CSR_TXRINGLEN, (~PCN_TX_LIST_CNT) + 1); /* We're not using the initialization block. */ pcn_csr_write(sc, PCN_CSR_IAB1, 0); /* Enable fast suspend mode. */ PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL2, PCN_EXTCTL2_FASTSPNDE); /* * Enable burst read and write. Also set the no underflow * bit. This will avoid transmit underruns in certain * conditions while still providing decent performance. */ PCN_BCR_SETBIT(sc, PCN_BCR_BUSCTL, PCN_BUSCTL_NOUFLOW| PCN_BUSCTL_BREAD|PCN_BUSCTL_BWRITE); /* Enable graceful recovery from underflow. */ PCN_CSR_SETBIT(sc, PCN_CSR_IMR, PCN_IMR_DXSUFLO); /* Enable auto-padding of short TX frames. */ PCN_CSR_SETBIT(sc, PCN_CSR_TFEAT, PCN_TFEAT_PAD_TX); /* Disable MII autoneg (we handle this ourselves). */ PCN_BCR_SETBIT(sc, PCN_BCR_MIICTL, PCN_MIICTL_DANAS); if (sc->pcn_type == Am79C978) /* XXX support other PHYs? */ pcn_bcr_write(sc, PCN_BCR_PHYSEL, PCN_PHYSEL_PCNET|PCN_PHY_HOMEPNA); /* Enable interrupts and start the controller running. */ pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); mii_mediachg(mii); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->pcn_stat_callout, hz, pcn_tick, sc); return; } /* * Set media options. */ static int pcn_ifmedia_upd(ifp) struct ifnet *ifp; { struct pcn_softc *sc; sc = ifp->if_softc; PCN_LOCK(sc); /* * At least Am79C971 with DP83840A can wedge when switching * from the internal 10baseT PHY to the external PHY without * issuing pcn_reset(). For setting the port in PCN_CSR_MODE * the PCnet chip has to be powered down or stopped anyway * and although documented otherwise it doesn't take effect * until the next initialization. */ sc->pcn_link = 0; pcn_stop(sc); pcn_reset(sc); pcn_init_locked(sc); if (ifp->if_snd.ifq_head != NULL) pcn_start_locked(ifp); PCN_UNLOCK(sc); return(0); } /* * Report current media status. */ static void pcn_ifmedia_sts(ifp, ifmr) struct ifnet *ifp; struct ifmediareq *ifmr; { struct pcn_softc *sc; struct mii_data *mii; sc = ifp->if_softc; mii = device_get_softc(sc->pcn_miibus); PCN_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; PCN_UNLOCK(sc); return; } static int pcn_ioctl(ifp, command, data) struct ifnet *ifp; u_long command; caddr_t data; { struct pcn_softc *sc = ifp->if_softc; struct ifreq *ifr = (struct ifreq *) data; struct mii_data *mii = NULL; int error = 0; switch(command) { case SIOCSIFFLAGS: PCN_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING && ifp->if_flags & IFF_PROMISC && !(sc->pcn_if_flags & IFF_PROMISC)) { PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_setfilt(ifp); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING && !(ifp->if_flags & IFF_PROMISC) && sc->pcn_if_flags & IFF_PROMISC) { PCN_CSR_SETBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_setfilt(ifp); PCN_CSR_CLRBIT(sc, PCN_CSR_EXTCTL1, PCN_EXTCTL1_SPND); pcn_csr_write(sc, PCN_CSR_CSR, PCN_CSR_INTEN|PCN_CSR_START); } else if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) pcn_init_locked(sc); } else { if (ifp->if_drv_flags & IFF_DRV_RUNNING) pcn_stop(sc); } sc->pcn_if_flags = ifp->if_flags; PCN_UNLOCK(sc); error = 0; break; case SIOCADDMULTI: case SIOCDELMULTI: PCN_LOCK(sc); pcn_setmulti(sc); PCN_UNLOCK(sc); error = 0; break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->pcn_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); break; } return(error); } static void pcn_watchdog(struct pcn_softc *sc) { struct ifnet *ifp; PCN_LOCK_ASSERT(sc); ifp = sc->pcn_ifp; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if_printf(ifp, "watchdog timeout\n"); pcn_stop(sc); pcn_reset(sc); pcn_init_locked(sc); if (ifp->if_snd.ifq_head != NULL) pcn_start_locked(ifp); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void pcn_stop(struct pcn_softc *sc) { int i; struct ifnet *ifp; PCN_LOCK_ASSERT(sc); ifp = sc->pcn_ifp; sc->pcn_timer = 0; callout_stop(&sc->pcn_stat_callout); /* Turn off interrupts */ PCN_CSR_CLRBIT(sc, PCN_CSR_CSR, PCN_CSR_INTEN); /* Stop adapter */ PCN_CSR_SETBIT(sc, PCN_CSR_CSR, PCN_CSR_STOP); sc->pcn_link = 0; /* * Free data in the RX lists. */ for (i = 0; i < PCN_RX_LIST_CNT; i++) { if (sc->pcn_cdata.pcn_rx_chain[i] != NULL) { m_freem(sc->pcn_cdata.pcn_rx_chain[i]); sc->pcn_cdata.pcn_rx_chain[i] = NULL; } } bzero((char *)&sc->pcn_ldata->pcn_rx_list, sizeof(sc->pcn_ldata->pcn_rx_list)); /* * Free the TX list buffers. */ for (i = 0; i < PCN_TX_LIST_CNT; i++) { if (sc->pcn_cdata.pcn_tx_chain[i] != NULL) { m_freem(sc->pcn_cdata.pcn_tx_chain[i]); sc->pcn_cdata.pcn_tx_chain[i] = NULL; } } bzero((char *)&sc->pcn_ldata->pcn_tx_list, sizeof(sc->pcn_ldata->pcn_tx_list)); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); return; } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int pcn_shutdown(device_t dev) { struct pcn_softc *sc; sc = device_get_softc(dev); PCN_LOCK(sc); pcn_reset(sc); pcn_stop(sc); PCN_UNLOCK(sc); return 0; } Index: head/sys/dev/ral/if_ral_pci.c =================================================================== --- head/sys/dev/ral/if_ral_pci.c (revision 338948) +++ head/sys/dev/ral/if_ral_pci.c (revision 338949) @@ -1,321 +1,323 @@ /*- * Copyright (c) 2005, 2006 * Damien Bergamini * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include __FBSDID("$FreeBSD$"); /* * PCI/Cardbus front-end for the Ralink RT2560/RT2561/RT2561S/RT2661 driver. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(ral, pci, 1, 1, 1); MODULE_DEPEND(ral, firmware, 1, 1, 1); MODULE_DEPEND(ral, wlan, 1, 1, 1); MODULE_DEPEND(ral, wlan_amrr, 1, 1, 1); static int ral_msi_disable; TUNABLE_INT("hw.ral.msi_disable", &ral_msi_disable); struct ral_pci_ident { uint16_t vendor; uint16_t device; const char *name; }; static const struct ral_pci_ident ral_pci_ids[] = { { 0x1432, 0x7708, "Edimax RT2860" }, { 0x1432, 0x7711, "Edimax RT3591" }, { 0x1432, 0x7722, "Edimax RT3591" }, { 0x1432, 0x7727, "Edimax RT2860" }, { 0x1432, 0x7728, "Edimax RT2860" }, { 0x1432, 0x7738, "Edimax RT2860" }, { 0x1432, 0x7748, "Edimax RT2860" }, { 0x1432, 0x7758, "Edimax RT2860" }, { 0x1432, 0x7768, "Edimax RT2860" }, { 0x1462, 0x891a, "MSI RT3090" }, { 0x1814, 0x0201, "Ralink Technology RT2560" }, { 0x1814, 0x0301, "Ralink Technology RT2561S" }, { 0x1814, 0x0302, "Ralink Technology RT2561" }, { 0x1814, 0x0401, "Ralink Technology RT2661" }, { 0x1814, 0x0601, "Ralink Technology RT2860" }, { 0x1814, 0x0681, "Ralink Technology RT2890" }, { 0x1814, 0x0701, "Ralink Technology RT2760" }, { 0x1814, 0x0781, "Ralink Technology RT2790" }, { 0x1814, 0x3060, "Ralink Technology RT3060" }, { 0x1814, 0x3062, "Ralink Technology RT3062" }, { 0x1814, 0x3090, "Ralink Technology RT3090" }, { 0x1814, 0x3091, "Ralink Technology RT3091" }, { 0x1814, 0x3092, "Ralink Technology RT3092" }, { 0x1814, 0x3390, "Ralink Technology RT3390" }, { 0x1814, 0x3562, "Ralink Technology RT3562" }, { 0x1814, 0x3592, "Ralink Technology RT3592" }, { 0x1814, 0x3593, "Ralink Technology RT3593" }, { 0x1814, 0x5360, "Ralink Technology RT5390" }, { 0x1814, 0x5362, "Ralink Technology RT5392" }, { 0x1814, 0x5390, "Ralink Technology RT5390" }, { 0x1814, 0x5392, "Ralink Technology RT5392" }, { 0x1814, 0x539a, "Ralink Technology RT5390" }, { 0x1814, 0x539f, "Ralink Technology RT5390" }, { 0x1a3b, 0x1059, "AWT RT2890" }, { 0, 0, NULL } }; static const struct ral_opns { int (*attach)(device_t, int); int (*detach)(void *); void (*shutdown)(void *); void (*suspend)(void *); void (*resume)(void *); void (*intr)(void *); } ral_rt2560_opns = { rt2560_attach, rt2560_detach, rt2560_stop, rt2560_stop, rt2560_resume, rt2560_intr }, ral_rt2661_opns = { rt2661_attach, rt2661_detach, rt2661_shutdown, rt2661_suspend, rt2661_resume, rt2661_intr }, ral_rt2860_opns = { rt2860_attach, rt2860_detach, rt2860_shutdown, rt2860_suspend, rt2860_resume, rt2860_intr }; struct ral_pci_softc { union { struct rt2560_softc sc_rt2560; struct rt2661_softc sc_rt2661; struct rt2860_softc sc_rt2860; } u; const struct ral_opns *sc_opns; struct resource *irq; struct resource *mem; void *sc_ih; }; static int ral_pci_probe(device_t); static int ral_pci_attach(device_t); static int ral_pci_detach(device_t); static int ral_pci_shutdown(device_t); static int ral_pci_suspend(device_t); static int ral_pci_resume(device_t); static device_method_t ral_pci_methods[] = { /* Device interface */ DEVMETHOD(device_probe, ral_pci_probe), DEVMETHOD(device_attach, ral_pci_attach), DEVMETHOD(device_detach, ral_pci_detach), DEVMETHOD(device_shutdown, ral_pci_shutdown), DEVMETHOD(device_suspend, ral_pci_suspend), DEVMETHOD(device_resume, ral_pci_resume), DEVMETHOD_END }; static driver_t ral_pci_driver = { "ral", ral_pci_methods, sizeof (struct ral_pci_softc) }; static devclass_t ral_devclass; DRIVER_MODULE(ral, pci, ral_pci_driver, ral_devclass, NULL, NULL); +MODULE_PNP_INFO("U16:vendor; U16:device; D:#", pci, ral, ral_pci_ids, + nitems(ral_pci_ids) - 1); static int ral_pci_probe(device_t dev) { const struct ral_pci_ident *ident; for (ident = ral_pci_ids; ident->name != NULL; ident++) { if (pci_get_vendor(dev) == ident->vendor && pci_get_device(dev) == ident->device) { device_set_desc(dev, ident->name); return (BUS_PROBE_DEFAULT); } } return ENXIO; } static int ral_pci_attach(device_t dev) { struct ral_pci_softc *psc = device_get_softc(dev); struct rt2560_softc *sc = &psc->u.sc_rt2560; int count, error, rid; pci_enable_busmaster(dev); switch (pci_get_device(dev)) { case 0x0201: psc->sc_opns = &ral_rt2560_opns; break; case 0x0301: case 0x0302: case 0x0401: psc->sc_opns = &ral_rt2661_opns; break; default: psc->sc_opns = &ral_rt2860_opns; break; } rid = PCIR_BAR(0); psc->mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (psc->mem == NULL) { device_printf(dev, "could not allocate memory resource\n"); return ENXIO; } sc->sc_st = rman_get_bustag(psc->mem); sc->sc_sh = rman_get_bushandle(psc->mem); sc->sc_invalid = 1; rid = 0; if (ral_msi_disable == 0) { count = 1; if (pci_alloc_msi(dev, &count) == 0) rid = 1; } psc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | (rid != 0 ? 0 : RF_SHAREABLE)); if (psc->irq == NULL) { device_printf(dev, "could not allocate interrupt resource\n"); pci_release_msi(dev); bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(psc->mem), psc->mem); return ENXIO; } error = (*psc->sc_opns->attach)(dev, pci_get_device(dev)); if (error != 0) { (void)ral_pci_detach(dev); return error; } /* * Hook our interrupt after all initialization is complete. */ error = bus_setup_intr(dev, psc->irq, INTR_TYPE_NET | INTR_MPSAFE, NULL, psc->sc_opns->intr, psc, &psc->sc_ih); if (error != 0) { device_printf(dev, "could not set up interrupt\n"); (void)ral_pci_detach(dev); return error; } sc->sc_invalid = 0; return 0; } static int ral_pci_detach(device_t dev) { struct ral_pci_softc *psc = device_get_softc(dev); struct rt2560_softc *sc = &psc->u.sc_rt2560; /* check if device was removed */ sc->sc_invalid = !bus_child_present(dev); if (psc->sc_ih != NULL) bus_teardown_intr(dev, psc->irq, psc->sc_ih); (*psc->sc_opns->detach)(psc); bus_generic_detach(dev); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(psc->irq), psc->irq); pci_release_msi(dev); bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(psc->mem), psc->mem); return 0; } static int ral_pci_shutdown(device_t dev) { struct ral_pci_softc *psc = device_get_softc(dev); (*psc->sc_opns->shutdown)(psc); return 0; } static int ral_pci_suspend(device_t dev) { struct ral_pci_softc *psc = device_get_softc(dev); (*psc->sc_opns->suspend)(psc); return 0; } static int ral_pci_resume(device_t dev) { struct ral_pci_softc *psc = device_get_softc(dev); (*psc->sc_opns->resume)(psc); return 0; } Index: head/sys/dev/rl/if_rl.c =================================================================== --- head/sys/dev/rl/if_rl.c (revision 338948) +++ head/sys/dev/rl/if_rl.c (revision 338949) @@ -1,2124 +1,2126 @@ /*- * Copyright (c) 1997, 1998 * Bill Paul . All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * This product includes software developed by Bill Paul. * 4. Neither the name of the author nor the names of any co-contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF * THE POSSIBILITY OF SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); /* * RealTek 8129/8139 PCI NIC driver * * Supports several extremely cheap PCI 10/100 adapters based on * the RealTek chipset. Datasheets can be obtained from * www.realtek.com.tw. * * Written by Bill Paul * Electrical Engineering Department * Columbia University, New York City */ /* * The RealTek 8139 PCI NIC redefines the meaning of 'low end.' This is * probably the worst PCI ethernet controller ever made, with the possible * exception of the FEAST chip made by SMC. The 8139 supports bus-master * DMA, but it has a terrible interface that nullifies any performance * gains that bus-master DMA usually offers. * * For transmission, the chip offers a series of four TX descriptor * registers. Each transmit frame must be in a contiguous buffer, aligned * on a longword (32-bit) boundary. This means we almost always have to * do mbuf copies in order to transmit a frame, except in the unlikely * case where a) the packet fits into a single mbuf, and b) the packet * is 32-bit aligned within the mbuf's data area. The presence of only * four descriptor registers means that we can never have more than four * packets queued for transmission at any one time. * * Reception is not much better. The driver has to allocate a single large * buffer area (up to 64K in size) into which the chip will DMA received * frames. Because we don't know where within this region received packets * will begin or end, we have no choice but to copy data from the buffer * area into mbufs in order to pass the packets up to the higher protocol * levels. * * It's impossible given this rotten design to really achieve decent * performance at 100Mbps, unless you happen to have a 400Mhz PII or * some equally overmuscled CPU to drive it. * * On the bright side, the 8139 does have a built-in PHY, although * rather than using an MDIO serial interface like most other NICs, the * PHY registers are directly accessible through the 8139's register * space. The 8139 supports autonegotiation, as well as a 64-bit multicast * filter. * * The 8129 chip is an older version of the 8139 that uses an external PHY * chip. The 8129 has a serial MDIO interface for accessing the MII where * the 8139 lets you directly access the on-board PHY registers. We need * to select which interface to use depending on the chip type. */ #ifdef HAVE_KERNEL_OPTION_HEADERS #include "opt_device_polling.h" #endif #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include MODULE_DEPEND(rl, pci, 1, 1, 1); MODULE_DEPEND(rl, ether, 1, 1, 1); MODULE_DEPEND(rl, miibus, 1, 1, 1); /* "device miibus" required. See GENERIC if you get errors here. */ #include "miibus_if.h" #include /* * Various supported device vendors/types and their names. */ static const struct rl_type rl_devs[] = { { RT_VENDORID, RT_DEVICEID_8129, RL_8129, "RealTek 8129 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8139, RL_8139, "RealTek 8139 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8139D, RL_8139, "RealTek 8139 10/100BaseTX" }, { RT_VENDORID, RT_DEVICEID_8138, RL_8139, "RealTek 8139 10/100BaseTX CardBus" }, { RT_VENDORID, RT_DEVICEID_8100, RL_8139, "RealTek 8100 10/100BaseTX" }, { ACCTON_VENDORID, ACCTON_DEVICEID_5030, RL_8139, "Accton MPX 5030/5038 10/100BaseTX" }, { DELTA_VENDORID, DELTA_DEVICEID_8139, RL_8139, "Delta Electronics 8139 10/100BaseTX" }, { ADDTRON_VENDORID, ADDTRON_DEVICEID_8139, RL_8139, "Addtron Technology 8139 10/100BaseTX" }, { DLINK_VENDORID, DLINK_DEVICEID_520TX_REVC1, RL_8139, "D-Link DFE-520TX (rev. C1) 10/100BaseTX" }, { DLINK_VENDORID, DLINK_DEVICEID_530TXPLUS, RL_8139, "D-Link DFE-530TX+ 10/100BaseTX" }, { DLINK_VENDORID, DLINK_DEVICEID_690TXD, RL_8139, "D-Link DFE-690TXD 10/100BaseTX" }, { NORTEL_VENDORID, ACCTON_DEVICEID_5030, RL_8139, "Nortel Networks 10/100BaseTX" }, { COREGA_VENDORID, COREGA_DEVICEID_FETHERCBTXD, RL_8139, "Corega FEther CB-TXD" }, { COREGA_VENDORID, COREGA_DEVICEID_FETHERIICBTXD, RL_8139, "Corega FEtherII CB-TXD" }, { PEPPERCON_VENDORID, PEPPERCON_DEVICEID_ROLF, RL_8139, "Peppercon AG ROL-F" }, { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3603TX, RL_8139, "Planex FNW-3603-TX" }, { PLANEX_VENDORID, PLANEX_DEVICEID_FNW3800TX, RL_8139, "Planex FNW-3800-TX" }, { CP_VENDORID, RT_DEVICEID_8139, RL_8139, "Compaq HNE-300" }, { LEVEL1_VENDORID, LEVEL1_DEVICEID_FPC0106TX, RL_8139, "LevelOne FPC-0106TX" }, { EDIMAX_VENDORID, EDIMAX_DEVICEID_EP4103DL, RL_8139, "Edimax EP-4103DL CardBus" } }; static int rl_attach(device_t); static int rl_detach(device_t); static void rl_dmamap_cb(void *, bus_dma_segment_t *, int, int); static int rl_dma_alloc(struct rl_softc *); static void rl_dma_free(struct rl_softc *); static void rl_eeprom_putbyte(struct rl_softc *, int); static void rl_eeprom_getword(struct rl_softc *, int, uint16_t *); static int rl_encap(struct rl_softc *, struct mbuf **); static int rl_list_tx_init(struct rl_softc *); static int rl_list_rx_init(struct rl_softc *); static int rl_ifmedia_upd(struct ifnet *); static void rl_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int rl_ioctl(struct ifnet *, u_long, caddr_t); static void rl_intr(void *); static void rl_init(void *); static void rl_init_locked(struct rl_softc *sc); static int rl_miibus_readreg(device_t, int, int); static void rl_miibus_statchg(device_t); static int rl_miibus_writereg(device_t, int, int, int); #ifdef DEVICE_POLLING static int rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count); static int rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count); #endif static int rl_probe(device_t); static void rl_read_eeprom(struct rl_softc *, uint8_t *, int, int, int); static void rl_reset(struct rl_softc *); static int rl_resume(device_t); static int rl_rxeof(struct rl_softc *); static void rl_rxfilter(struct rl_softc *); static int rl_shutdown(device_t); static void rl_start(struct ifnet *); static void rl_start_locked(struct ifnet *); static void rl_stop(struct rl_softc *); static int rl_suspend(device_t); static void rl_tick(void *); static void rl_txeof(struct rl_softc *); static void rl_watchdog(struct rl_softc *); static void rl_setwol(struct rl_softc *); static void rl_clrwol(struct rl_softc *); /* * MII bit-bang glue */ static uint32_t rl_mii_bitbang_read(device_t); static void rl_mii_bitbang_write(device_t, uint32_t); static const struct mii_bitbang_ops rl_mii_bitbang_ops = { rl_mii_bitbang_read, rl_mii_bitbang_write, { RL_MII_DATAOUT, /* MII_BIT_MDO */ RL_MII_DATAIN, /* MII_BIT_MDI */ RL_MII_CLK, /* MII_BIT_MDC */ RL_MII_DIR, /* MII_BIT_DIR_HOST_PHY */ 0, /* MII_BIT_DIR_PHY_HOST */ } }; static device_method_t rl_methods[] = { /* Device interface */ DEVMETHOD(device_probe, rl_probe), DEVMETHOD(device_attach, rl_attach), DEVMETHOD(device_detach, rl_detach), DEVMETHOD(device_suspend, rl_suspend), DEVMETHOD(device_resume, rl_resume), DEVMETHOD(device_shutdown, rl_shutdown), /* MII interface */ DEVMETHOD(miibus_readreg, rl_miibus_readreg), DEVMETHOD(miibus_writereg, rl_miibus_writereg), DEVMETHOD(miibus_statchg, rl_miibus_statchg), DEVMETHOD_END }; static driver_t rl_driver = { "rl", rl_methods, sizeof(struct rl_softc) }; static devclass_t rl_devclass; DRIVER_MODULE(rl, pci, rl_driver, rl_devclass, 0, 0); +MODULE_PNP_INFO("U16:vendor; U16:device", pci, rl, rl_devs, + nitems(rl_devs) - 1); DRIVER_MODULE(rl, cardbus, rl_driver, rl_devclass, 0, 0); DRIVER_MODULE(miibus, rl, miibus_driver, miibus_devclass, 0, 0); #define EE_SET(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) | x) #define EE_CLR(x) \ CSR_WRITE_1(sc, RL_EECMD, \ CSR_READ_1(sc, RL_EECMD) & ~x) /* * Send a read command and address to the EEPROM, check for ACK. */ static void rl_eeprom_putbyte(struct rl_softc *sc, int addr) { int d, i; d = addr | sc->rl_eecmd_read; /* * Feed in each bit and strobe the clock. */ for (i = 0x400; i; i >>= 1) { if (d & i) { EE_SET(RL_EE_DATAIN); } else { EE_CLR(RL_EE_DATAIN); } DELAY(100); EE_SET(RL_EE_CLK); DELAY(150); EE_CLR(RL_EE_CLK); DELAY(100); } } /* * Read a word of data stored in the EEPROM at address 'addr.' */ static void rl_eeprom_getword(struct rl_softc *sc, int addr, uint16_t *dest) { int i; uint16_t word = 0; /* Enter EEPROM access mode. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); /* * Send address of word we want to read. */ rl_eeprom_putbyte(sc, addr); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_PROGRAM|RL_EE_SEL); /* * Start reading bits from EEPROM. */ for (i = 0x8000; i; i >>= 1) { EE_SET(RL_EE_CLK); DELAY(100); if (CSR_READ_1(sc, RL_EECMD) & RL_EE_DATAOUT) word |= i; EE_CLR(RL_EE_CLK); DELAY(100); } /* Turn off EEPROM access mode. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); *dest = word; } /* * Read a sequence of words from the EEPROM. */ static void rl_read_eeprom(struct rl_softc *sc, uint8_t *dest, int off, int cnt, int swap) { int i; uint16_t word = 0, *ptr; for (i = 0; i < cnt; i++) { rl_eeprom_getword(sc, off + i, &word); ptr = (uint16_t *)(dest + (i * 2)); if (swap) *ptr = ntohs(word); else *ptr = word; } } /* * Read the MII serial port for the MII bit-bang module. */ static uint32_t rl_mii_bitbang_read(device_t dev) { struct rl_softc *sc; uint32_t val; sc = device_get_softc(dev); val = CSR_READ_1(sc, RL_MII); CSR_BARRIER(sc, RL_MII, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); return (val); } /* * Write the MII serial port for the MII bit-bang module. */ static void rl_mii_bitbang_write(device_t dev, uint32_t val) { struct rl_softc *sc; sc = device_get_softc(dev); CSR_WRITE_1(sc, RL_MII, val); CSR_BARRIER(sc, RL_MII, 1, BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE); } static int rl_miibus_readreg(device_t dev, int phy, int reg) { struct rl_softc *sc; uint16_t rl8139_reg; sc = device_get_softc(dev); if (sc->rl_type == RL_8139) { switch (reg) { case MII_BMCR: rl8139_reg = RL_BMCR; break; case MII_BMSR: rl8139_reg = RL_BMSR; break; case MII_ANAR: rl8139_reg = RL_ANAR; break; case MII_ANER: rl8139_reg = RL_ANER; break; case MII_ANLPAR: rl8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); /* * Allow the rlphy driver to read the media status * register. If we have a link partner which does not * support NWAY, this is the register which will tell * us the results of parallel detection. */ case RL_MEDIASTAT: return (CSR_READ_1(sc, RL_MEDIASTAT)); default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } return (CSR_READ_2(sc, rl8139_reg)); } return (mii_bitbang_readreg(dev, &rl_mii_bitbang_ops, phy, reg)); } static int rl_miibus_writereg(device_t dev, int phy, int reg, int data) { struct rl_softc *sc; uint16_t rl8139_reg; sc = device_get_softc(dev); if (sc->rl_type == RL_8139) { switch (reg) { case MII_BMCR: rl8139_reg = RL_BMCR; break; case MII_BMSR: rl8139_reg = RL_BMSR; break; case MII_ANAR: rl8139_reg = RL_ANAR; break; case MII_ANER: rl8139_reg = RL_ANER; break; case MII_ANLPAR: rl8139_reg = RL_LPAR; break; case MII_PHYIDR1: case MII_PHYIDR2: return (0); break; default: device_printf(sc->rl_dev, "bad phy register\n"); return (0); } CSR_WRITE_2(sc, rl8139_reg, data); return (0); } mii_bitbang_writereg(dev, &rl_mii_bitbang_ops, phy, reg, data); return (0); } static void rl_miibus_statchg(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; struct mii_data *mii; sc = device_get_softc(dev); mii = device_get_softc(sc->rl_miibus); ifp = sc->rl_ifp; if (mii == NULL || ifp == NULL || (ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; sc->rl_flags &= ~RL_FLAG_LINK; if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_10_T: case IFM_100_TX: sc->rl_flags |= RL_FLAG_LINK; break; default: break; } } /* * RealTek controllers do not provide any interface to * Tx/Rx MACs for resolved speed, duplex and flow-control * parameters. */ } /* * Program the 64-bit multicast hash filter. */ static void rl_rxfilter(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; int h = 0; uint32_t hashes[2] = { 0, 0 }; struct ifmultiaddr *ifma; uint32_t rxfilt; RL_LOCK_ASSERT(sc); rxfilt = CSR_READ_4(sc, RL_RXCFG); rxfilt &= ~(RL_RXCFG_RX_ALLPHYS | RL_RXCFG_RX_BROAD | RL_RXCFG_RX_MULTI); /* Always accept frames destined for this host. */ rxfilt |= RL_RXCFG_RX_INDIV; /* Set capture broadcast bit to capture broadcast frames. */ if (ifp->if_flags & IFF_BROADCAST) rxfilt |= RL_RXCFG_RX_BROAD; if (ifp->if_flags & IFF_ALLMULTI || ifp->if_flags & IFF_PROMISC) { rxfilt |= RL_RXCFG_RX_MULTI; if (ifp->if_flags & IFF_PROMISC) rxfilt |= RL_RXCFG_RX_ALLPHYS; hashes[0] = 0xFFFFFFFF; hashes[1] = 0xFFFFFFFF; } else { /* Now program new ones. */ if_maddr_rlock(ifp); CK_STAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; h = ether_crc32_be(LLADDR((struct sockaddr_dl *) ifma->ifma_addr), ETHER_ADDR_LEN) >> 26; if (h < 32) hashes[0] |= (1 << h); else hashes[1] |= (1 << (h - 32)); } if_maddr_runlock(ifp); if (hashes[0] != 0 || hashes[1] != 0) rxfilt |= RL_RXCFG_RX_MULTI; } CSR_WRITE_4(sc, RL_MAR0, hashes[0]); CSR_WRITE_4(sc, RL_MAR4, hashes[1]); CSR_WRITE_4(sc, RL_RXCFG, rxfilt); } static void rl_reset(struct rl_softc *sc) { int i; RL_LOCK_ASSERT(sc); CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_RESET); for (i = 0; i < RL_TIMEOUT; i++) { DELAY(10); if (!(CSR_READ_1(sc, RL_COMMAND) & RL_CMD_RESET)) break; } if (i == RL_TIMEOUT) device_printf(sc->rl_dev, "reset never completed!\n"); } /* * Probe for a RealTek 8129/8139 chip. Check the PCI vendor and device * IDs against our list and return a device name if we find a match. */ static int rl_probe(device_t dev) { const struct rl_type *t; uint16_t devid, revid, vendor; int i; vendor = pci_get_vendor(dev); devid = pci_get_device(dev); revid = pci_get_revid(dev); if (vendor == RT_VENDORID && devid == RT_DEVICEID_8139) { if (revid == 0x20) { /* 8139C+, let re(4) take care of this device. */ return (ENXIO); } } t = rl_devs; for (i = 0; i < nitems(rl_devs); i++, t++) { if (vendor == t->rl_vid && devid == t->rl_did) { device_set_desc(dev, t->rl_name); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } struct rl_dmamap_arg { bus_addr_t rl_busaddr; }; static void rl_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) { struct rl_dmamap_arg *ctx; if (error != 0) return; KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs)); ctx = (struct rl_dmamap_arg *)arg; ctx->rl_busaddr = segs[0].ds_addr; } /* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ static int rl_attach(device_t dev) { uint8_t eaddr[ETHER_ADDR_LEN]; uint16_t as[3]; struct ifnet *ifp; struct rl_softc *sc; const struct rl_type *t; struct sysctl_ctx_list *ctx; struct sysctl_oid_list *children; int error = 0, hwrev, i, phy, pmc, rid; int prefer_iomap, unit; uint16_t rl_did = 0; char tn[32]; sc = device_get_softc(dev); unit = device_get_unit(dev); sc->rl_dev = dev; sc->rl_twister_enable = 0; snprintf(tn, sizeof(tn), "dev.rl.%d.twister_enable", unit); TUNABLE_INT_FETCH(tn, &sc->rl_twister_enable); ctx = device_get_sysctl_ctx(sc->rl_dev); children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->rl_dev)); SYSCTL_ADD_INT(ctx, children, OID_AUTO, "twister_enable", CTLFLAG_RD, &sc->rl_twister_enable, 0, ""); mtx_init(&sc->rl_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->rl_stat_callout, &sc->rl_mtx, 0); pci_enable_busmaster(dev); /* * Map control/status registers. * Default to using PIO access for this driver. On SMP systems, * there appear to be problems with memory mapped mode: it looks * like doing too many memory mapped access back to back in rapid * succession can hang the bus. I'm inclined to blame this on * crummy design/construction on the part of RealTek. Memory * mapped mode does appear to work on uniprocessor systems though. */ prefer_iomap = 1; snprintf(tn, sizeof(tn), "dev.rl.%d.prefer_iomap", unit); TUNABLE_INT_FETCH(tn, &prefer_iomap); if (prefer_iomap) { sc->rl_res_id = PCIR_BAR(0); sc->rl_res_type = SYS_RES_IOPORT; sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, &sc->rl_res_id, RF_ACTIVE); } if (prefer_iomap == 0 || sc->rl_res == NULL) { sc->rl_res_id = PCIR_BAR(1); sc->rl_res_type = SYS_RES_MEMORY; sc->rl_res = bus_alloc_resource_any(dev, sc->rl_res_type, &sc->rl_res_id, RF_ACTIVE); } if (sc->rl_res == NULL) { device_printf(dev, "couldn't map ports/memory\n"); error = ENXIO; goto fail; } #ifdef notdef /* * Detect the Realtek 8139B. For some reason, this chip is very * unstable when left to autoselect the media * The best workaround is to set the device to the required * media type or to set it to the 10 Meg speed. */ if ((rman_get_end(sc->rl_res) - rman_get_start(sc->rl_res)) == 0xFF) device_printf(dev, "Realtek 8139B detected. Warning, this may be unstable in autoselect mode\n"); #endif sc->rl_btag = rman_get_bustag(sc->rl_res); sc->rl_bhandle = rman_get_bushandle(sc->rl_res); /* Allocate interrupt */ rid = 0; sc->rl_irq[0] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->rl_irq[0] == NULL) { device_printf(dev, "couldn't map interrupt\n"); error = ENXIO; goto fail; } sc->rl_cfg0 = RL_8139_CFG0; sc->rl_cfg1 = RL_8139_CFG1; sc->rl_cfg2 = 0; sc->rl_cfg3 = RL_8139_CFG3; sc->rl_cfg4 = RL_8139_CFG4; sc->rl_cfg5 = RL_8139_CFG5; /* * Reset the adapter. Only take the lock here as it's needed in * order to call rl_reset(). */ RL_LOCK(sc); rl_reset(sc); RL_UNLOCK(sc); sc->rl_eecmd_read = RL_EECMD_READ_6BIT; rl_read_eeprom(sc, (uint8_t *)&rl_did, 0, 1, 0); if (rl_did != 0x8129) sc->rl_eecmd_read = RL_EECMD_READ_8BIT; /* * Get station address from the EEPROM. */ rl_read_eeprom(sc, (uint8_t *)as, RL_EE_EADDR, 3, 0); for (i = 0; i < 3; i++) { eaddr[(i * 2) + 0] = as[i] & 0xff; eaddr[(i * 2) + 1] = as[i] >> 8; } /* * Now read the exact device type from the EEPROM to find * out if it's an 8129 or 8139. */ rl_read_eeprom(sc, (uint8_t *)&rl_did, RL_EE_PCI_DID, 1, 0); t = rl_devs; sc->rl_type = 0; while(t->rl_name != NULL) { if (rl_did == t->rl_did) { sc->rl_type = t->rl_basetype; break; } t++; } if (sc->rl_type == 0) { device_printf(dev, "unknown device ID: %x assuming 8139\n", rl_did); sc->rl_type = RL_8139; /* * Read RL_IDR register to get ethernet address as accessing * EEPROM may not extract correct address. */ for (i = 0; i < ETHER_ADDR_LEN; i++) eaddr[i] = CSR_READ_1(sc, RL_IDR0 + i); } if ((error = rl_dma_alloc(sc)) != 0) goto fail; ifp = sc->rl_ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); error = ENOSPC; goto fail; } #define RL_PHYAD_INTERNAL 0 /* Do MII setup */ phy = MII_PHY_ANY; if (sc->rl_type == RL_8139) phy = RL_PHYAD_INTERNAL; error = mii_attach(dev, &sc->rl_miibus, ifp, rl_ifmedia_upd, rl_ifmedia_sts, BMSR_DEFCAPMASK, phy, MII_OFFSET_ANY, 0); if (error != 0) { device_printf(dev, "attaching PHYs failed\n"); goto fail; } ifp->if_softc = sc; if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_mtu = ETHERMTU; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = rl_ioctl; ifp->if_start = rl_start; ifp->if_init = rl_init; ifp->if_capabilities = IFCAP_VLAN_MTU; /* Check WOL for RTL8139B or newer controllers. */ if (sc->rl_type == RL_8139 && pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) { hwrev = CSR_READ_4(sc, RL_TXCFG) & RL_TXCFG_HWREV; switch (hwrev) { case RL_HWREV_8139B: case RL_HWREV_8130: case RL_HWREV_8139C: case RL_HWREV_8139D: case RL_HWREV_8101: case RL_HWREV_8100: ifp->if_capabilities |= IFCAP_WOL; /* Disable WOL. */ rl_clrwol(sc); break; default: break; } } ifp->if_capenable = ifp->if_capabilities; ifp->if_capenable &= ~(IFCAP_WOL_UCAST | IFCAP_WOL_MCAST); #ifdef DEVICE_POLLING ifp->if_capabilities |= IFCAP_POLLING; #endif IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); /* * Call MI attach routine. */ ether_ifattach(ifp, eaddr); /* Hook interrupt last to avoid having to lock softc */ error = bus_setup_intr(dev, sc->rl_irq[0], INTR_TYPE_NET | INTR_MPSAFE, NULL, rl_intr, sc, &sc->rl_intrhand[0]); if (error) { device_printf(sc->rl_dev, "couldn't set up irq\n"); ether_ifdetach(ifp); } fail: if (error) rl_detach(dev); return (error); } /* * Shutdown hardware and free up resources. This can be called any * time after the mutex has been initialized. It is called in both * the error case in attach and the normal detach case so it needs * to be careful about only freeing resources that have actually been * allocated. */ static int rl_detach(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; sc = device_get_softc(dev); ifp = sc->rl_ifp; KASSERT(mtx_initialized(&sc->rl_mtx), ("rl mutex not initialized")); #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(ifp); #endif /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { RL_LOCK(sc); rl_stop(sc); RL_UNLOCK(sc); callout_drain(&sc->rl_stat_callout); ether_ifdetach(ifp); } #if 0 sc->suspended = 1; #endif if (sc->rl_miibus) device_delete_child(dev, sc->rl_miibus); bus_generic_detach(dev); if (sc->rl_intrhand[0]) bus_teardown_intr(dev, sc->rl_irq[0], sc->rl_intrhand[0]); if (sc->rl_irq[0]) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->rl_irq[0]); if (sc->rl_res) bus_release_resource(dev, sc->rl_res_type, sc->rl_res_id, sc->rl_res); if (ifp) if_free(ifp); rl_dma_free(sc); mtx_destroy(&sc->rl_mtx); return (0); } static int rl_dma_alloc(struct rl_softc *sc) { struct rl_dmamap_arg ctx; int error, i; /* * Allocate the parent bus DMA tag appropriate for PCI. */ error = bus_dma_tag_create(bus_get_dma_tag(sc->rl_dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rl_parent_tag); if (error) { device_printf(sc->rl_dev, "failed to create parent DMA tag.\n"); goto fail; } /* Create DMA tag for Rx memory block. */ error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */ RL_RX_8139_BUF_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, 1, /* maxsize,nsegments */ RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rl_cdata.rl_rx_tag); if (error) { device_printf(sc->rl_dev, "failed to create Rx memory block DMA tag.\n"); goto fail; } /* Create DMA tag for Tx buffer. */ error = bus_dma_tag_create(sc->rl_parent_tag, /* parent */ RL_TX_8139_BUF_ALIGN, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, 1, /* maxsize, nsegments */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rl_cdata.rl_tx_tag); if (error) { device_printf(sc->rl_dev, "failed to create Tx DMA tag.\n"); goto fail; } /* * Allocate DMA'able memory and load DMA map for Rx memory block. */ error = bus_dmamem_alloc(sc->rl_cdata.rl_rx_tag, (void **)&sc->rl_cdata.rl_rx_buf, BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO, &sc->rl_cdata.rl_rx_dmamap); if (error != 0) { device_printf(sc->rl_dev, "failed to allocate Rx DMA memory block.\n"); goto fail; } ctx.rl_busaddr = 0; error = bus_dmamap_load(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap, sc->rl_cdata.rl_rx_buf, RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ, rl_dmamap_cb, &ctx, BUS_DMA_NOWAIT); if (error != 0 || ctx.rl_busaddr == 0) { device_printf(sc->rl_dev, "could not load Rx DMA memory block.\n"); goto fail; } sc->rl_cdata.rl_rx_buf_paddr = ctx.rl_busaddr; /* Create DMA maps for Tx buffers. */ for (i = 0; i < RL_TX_LIST_CNT; i++) { sc->rl_cdata.rl_tx_chain[i] = NULL; sc->rl_cdata.rl_tx_dmamap[i] = NULL; error = bus_dmamap_create(sc->rl_cdata.rl_tx_tag, 0, &sc->rl_cdata.rl_tx_dmamap[i]); if (error != 0) { device_printf(sc->rl_dev, "could not create Tx dmamap.\n"); goto fail; } } /* Leave a few bytes before the start of the RX ring buffer. */ sc->rl_cdata.rl_rx_buf_ptr = sc->rl_cdata.rl_rx_buf; sc->rl_cdata.rl_rx_buf += RL_RX_8139_BUF_RESERVE; fail: return (error); } static void rl_dma_free(struct rl_softc *sc) { int i; /* Rx memory block. */ if (sc->rl_cdata.rl_rx_tag != NULL) { if (sc->rl_cdata.rl_rx_buf_paddr != 0) bus_dmamap_unload(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap); if (sc->rl_cdata.rl_rx_buf_ptr != NULL) bus_dmamem_free(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_buf_ptr, sc->rl_cdata.rl_rx_dmamap); sc->rl_cdata.rl_rx_buf_ptr = NULL; sc->rl_cdata.rl_rx_buf = NULL; sc->rl_cdata.rl_rx_buf_paddr = 0; bus_dma_tag_destroy(sc->rl_cdata.rl_rx_tag); sc->rl_cdata.rl_tx_tag = NULL; } /* Tx buffers. */ if (sc->rl_cdata.rl_tx_tag != NULL) { for (i = 0; i < RL_TX_LIST_CNT; i++) { if (sc->rl_cdata.rl_tx_dmamap[i] != NULL) { bus_dmamap_destroy( sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_tx_dmamap[i]); sc->rl_cdata.rl_tx_dmamap[i] = NULL; } } bus_dma_tag_destroy(sc->rl_cdata.rl_tx_tag); sc->rl_cdata.rl_tx_tag = NULL; } if (sc->rl_parent_tag != NULL) { bus_dma_tag_destroy(sc->rl_parent_tag); sc->rl_parent_tag = NULL; } } /* * Initialize the transmit descriptors. */ static int rl_list_tx_init(struct rl_softc *sc) { struct rl_chain_data *cd; int i; RL_LOCK_ASSERT(sc); cd = &sc->rl_cdata; for (i = 0; i < RL_TX_LIST_CNT; i++) { cd->rl_tx_chain[i] = NULL; CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000); } sc->rl_cdata.cur_tx = 0; sc->rl_cdata.last_tx = 0; return (0); } static int rl_list_rx_init(struct rl_softc *sc) { RL_LOCK_ASSERT(sc); bzero(sc->rl_cdata.rl_rx_buf_ptr, RL_RXBUFLEN + RL_RX_8139_BUF_GUARD_SZ); bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_rx_dmamap, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); return (0); } /* * A frame has been uploaded: pass the resulting mbuf chain up to * the higher level protocols. * * You know there's something wrong with a PCI bus-master chip design * when you have to use m_devget(). * * The receive operation is badly documented in the datasheet, so I'll * attempt to document it here. The driver provides a buffer area and * places its base address in the RX buffer start address register. * The chip then begins copying frames into the RX buffer. Each frame * is preceded by a 32-bit RX status word which specifies the length * of the frame and certain other status bits. Each frame (starting with * the status word) is also 32-bit aligned. The frame length is in the * first 16 bits of the status word; the lower 15 bits correspond with * the 'rx status register' mentioned in the datasheet. * * Note: to make the Alpha happy, the frame payload needs to be aligned * on a 32-bit boundary. To achieve this, we pass RL_ETHER_ALIGN (2 bytes) * as the offset argument to m_devget(). */ static int rl_rxeof(struct rl_softc *sc) { struct mbuf *m; struct ifnet *ifp = sc->rl_ifp; uint8_t *rxbufpos; int total_len = 0; int wrap = 0; int rx_npkts = 0; uint32_t rxstat; uint16_t cur_rx; uint16_t limit; uint16_t max_bytes, rx_bytes = 0; RL_LOCK_ASSERT(sc); bus_dmamap_sync(sc->rl_cdata.rl_rx_tag, sc->rl_cdata.rl_rx_dmamap, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); cur_rx = (CSR_READ_2(sc, RL_CURRXADDR) + 16) % RL_RXBUFLEN; /* Do not try to read past this point. */ limit = CSR_READ_2(sc, RL_CURRXBUF) % RL_RXBUFLEN; if (limit < cur_rx) max_bytes = (RL_RXBUFLEN - cur_rx) + limit; else max_bytes = limit - cur_rx; while((CSR_READ_1(sc, RL_COMMAND) & RL_CMD_EMPTY_RXBUF) == 0) { #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) { if (sc->rxcycles <= 0) break; sc->rxcycles--; } #endif rxbufpos = sc->rl_cdata.rl_rx_buf + cur_rx; rxstat = le32toh(*(uint32_t *)rxbufpos); /* * Here's a totally undocumented fact for you. When the * RealTek chip is in the process of copying a packet into * RAM for you, the length will be 0xfff0. If you spot a * packet header with this value, you need to stop. The * datasheet makes absolutely no mention of this and * RealTek should be shot for this. */ total_len = rxstat >> 16; if (total_len == RL_RXSTAT_UNFINISHED) break; if (!(rxstat & RL_RXSTAT_RXOK) || total_len < ETHER_MIN_LEN || total_len > ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN) { if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); ifp->if_drv_flags &= ~IFF_DRV_RUNNING; rl_init_locked(sc); return (rx_npkts); } /* No errors; receive the packet. */ rx_bytes += total_len + 4; /* * XXX The RealTek chip includes the CRC with every * received frame, and there's no way to turn this * behavior off (at least, I can't find anything in * the manual that explains how to do it) so we have * to trim off the CRC manually. */ total_len -= ETHER_CRC_LEN; /* * Avoid trying to read more bytes than we know * the chip has prepared for us. */ if (rx_bytes > max_bytes) break; rxbufpos = sc->rl_cdata.rl_rx_buf + ((cur_rx + sizeof(uint32_t)) % RL_RXBUFLEN); if (rxbufpos == (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN)) rxbufpos = sc->rl_cdata.rl_rx_buf; wrap = (sc->rl_cdata.rl_rx_buf + RL_RXBUFLEN) - rxbufpos; if (total_len > wrap) { m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, NULL); if (m != NULL) m_copyback(m, wrap, total_len - wrap, sc->rl_cdata.rl_rx_buf); cur_rx = (total_len - wrap + ETHER_CRC_LEN); } else { m = m_devget(rxbufpos, total_len, RL_ETHER_ALIGN, ifp, NULL); cur_rx += total_len + 4 + ETHER_CRC_LEN; } /* Round up to 32-bit boundary. */ cur_rx = (cur_rx + 3) & ~3; CSR_WRITE_2(sc, RL_CURRXADDR, cur_rx - 16); if (m == NULL) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); continue; } if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); RL_UNLOCK(sc); (*ifp->if_input)(ifp, m); RL_LOCK(sc); rx_npkts++; } /* No need to sync Rx memory block as we didn't modify it. */ return (rx_npkts); } /* * A frame was downloaded to the chip. It's safe for us to clean up * the list buffers. */ static void rl_txeof(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; uint32_t txstat; RL_LOCK_ASSERT(sc); /* * Go through our tx list and free mbufs for those * frames that have been uploaded. */ do { if (RL_LAST_TXMBUF(sc) == NULL) break; txstat = CSR_READ_4(sc, RL_LAST_TXSTAT(sc)); if (!(txstat & (RL_TXSTAT_TX_OK| RL_TXSTAT_TX_UNDERRUN|RL_TXSTAT_TXABRT))) break; if_inc_counter(ifp, IFCOUNTER_COLLISIONS, (txstat & RL_TXSTAT_COLLCNT) >> 24); bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc), BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, RL_LAST_DMAMAP(sc)); m_freem(RL_LAST_TXMBUF(sc)); RL_LAST_TXMBUF(sc) = NULL; /* * If there was a transmit underrun, bump the TX threshold. * Make sure not to overflow the 63 * 32byte we can address * with the 6 available bit. */ if ((txstat & RL_TXSTAT_TX_UNDERRUN) && (sc->rl_txthresh < 2016)) sc->rl_txthresh += 32; if (txstat & RL_TXSTAT_TX_OK) if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); else { int oldthresh; if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if ((txstat & RL_TXSTAT_TXABRT) || (txstat & RL_TXSTAT_OUTOFWIN)) CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); oldthresh = sc->rl_txthresh; /* error recovery */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; rl_init_locked(sc); /* restore original threshold */ sc->rl_txthresh = oldthresh; return; } RL_INC(sc->rl_cdata.last_tx); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } while (sc->rl_cdata.last_tx != sc->rl_cdata.cur_tx); if (RL_LAST_TXMBUF(sc) == NULL) sc->rl_watchdog_timer = 0; } static void rl_twister_update(struct rl_softc *sc) { uint16_t linktest; /* * Table provided by RealTek (Kinston ) for * Linux driver. Values undocumented otherwise. */ static const uint32_t param[4][4] = { {0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43}, {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, {0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83}, {0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83} }; /* * Tune the so-called twister registers of the RTL8139. These * are used to compensate for impedance mismatches. The * method for tuning these registers is undocumented and the * following procedure is collected from public sources. */ switch (sc->rl_twister) { case CHK_LINK: /* * If we have a sufficient link, then we can proceed in * the state machine to the next stage. If not, then * disable further tuning after writing sane defaults. */ if (CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_LINK_OK) { CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_OFF_CMD); sc->rl_twister = FIND_ROW; } else { CSR_WRITE_2(sc, RL_CSCFG, RL_CSCFG_LINK_DOWN_CMD); CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST); CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF); CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF); sc->rl_twister = DONE; } break; case FIND_ROW: /* * Read how long it took to see the echo to find the tuning * row to use. */ linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS; if (linktest == RL_CSCFG_ROW3) sc->rl_twist_row = 3; else if (linktest == RL_CSCFG_ROW2) sc->rl_twist_row = 2; else if (linktest == RL_CSCFG_ROW1) sc->rl_twist_row = 1; else sc->rl_twist_row = 0; sc->rl_twist_col = 0; sc->rl_twister = SET_PARAM; break; case SET_PARAM: if (sc->rl_twist_col == 0) CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET); CSR_WRITE_4(sc, RL_PARA7C, param[sc->rl_twist_row][sc->rl_twist_col]); if (++sc->rl_twist_col == 4) { if (sc->rl_twist_row == 3) sc->rl_twister = RECHK_LONG; else sc->rl_twister = DONE; } break; case RECHK_LONG: /* * For long cables, we have to double check to make sure we * don't mistune. */ linktest = CSR_READ_2(sc, RL_CSCFG) & RL_CSCFG_STATUS; if (linktest == RL_CSCFG_ROW3) sc->rl_twister = DONE; else { CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_RETUNE); sc->rl_twister = RETUNE; } break; case RETUNE: /* Retune for a shorter cable (try column 2) */ CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_CBL_TEST); CSR_WRITE_4(sc, RL_PARA78, RL_PARA78_DEF); CSR_WRITE_4(sc, RL_PARA7C, RL_PARA7C_DEF); CSR_WRITE_4(sc, RL_NWAYTST, RL_NWAYTST_RESET); sc->rl_twist_row--; sc->rl_twist_col = 0; sc->rl_twister = SET_PARAM; break; case DONE: break; } } static void rl_tick(void *xsc) { struct rl_softc *sc = xsc; struct mii_data *mii; int ticks; RL_LOCK_ASSERT(sc); /* * If we're doing the twister cable calibration, then we need to defer * watchdog timeouts. This is a no-op in normal operations, but * can falsely trigger when the cable calibration takes a while and * there was traffic ready to go when rl was started. * * We don't defer mii_tick since that updates the mii status, which * helps the twister process, at least according to similar patches * for the Linux driver I found online while doing the fixes. Worst * case is a few extra mii reads during calibration. */ mii = device_get_softc(sc->rl_miibus); mii_tick(mii); if ((sc->rl_flags & RL_FLAG_LINK) == 0) rl_miibus_statchg(sc->rl_dev); if (sc->rl_twister_enable) { if (sc->rl_twister == DONE) rl_watchdog(sc); else rl_twister_update(sc); if (sc->rl_twister == DONE) ticks = hz; else ticks = hz / 10; } else { rl_watchdog(sc); ticks = hz; } callout_reset(&sc->rl_stat_callout, ticks, rl_tick, sc); } #ifdef DEVICE_POLLING static int rl_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; int rx_npkts = 0; RL_LOCK(sc); if (ifp->if_drv_flags & IFF_DRV_RUNNING) rx_npkts = rl_poll_locked(ifp, cmd, count); RL_UNLOCK(sc); return (rx_npkts); } static int rl_poll_locked(struct ifnet *ifp, enum poll_cmd cmd, int count) { struct rl_softc *sc = ifp->if_softc; int rx_npkts; RL_LOCK_ASSERT(sc); sc->rxcycles = count; rx_npkts = rl_rxeof(sc); rl_txeof(sc); if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) rl_start_locked(ifp); if (cmd == POLL_AND_CHECK_STATUS) { uint16_t status; /* We should also check the status register. */ status = CSR_READ_2(sc, RL_ISR); if (status == 0xffff) return (rx_npkts); if (status != 0) CSR_WRITE_2(sc, RL_ISR, status); /* XXX We should check behaviour on receiver stalls. */ if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; rl_init_locked(sc); } } return (rx_npkts); } #endif /* DEVICE_POLLING */ static void rl_intr(void *arg) { struct rl_softc *sc = arg; struct ifnet *ifp = sc->rl_ifp; uint16_t status; int count; RL_LOCK(sc); if (sc->suspended) goto done_locked; #ifdef DEVICE_POLLING if (ifp->if_capenable & IFCAP_POLLING) goto done_locked; #endif if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) goto done_locked2; status = CSR_READ_2(sc, RL_ISR); if (status == 0xffff || (status & RL_INTRS) == 0) goto done_locked; /* * Ours, disable further interrupts. */ CSR_WRITE_2(sc, RL_IMR, 0); for (count = 16; count > 0; count--) { CSR_WRITE_2(sc, RL_ISR, status); if (ifp->if_drv_flags & IFF_DRV_RUNNING) { if (status & (RL_ISR_RX_OK | RL_ISR_RX_ERR)) rl_rxeof(sc); if (status & (RL_ISR_TX_OK | RL_ISR_TX_ERR)) rl_txeof(sc); if (status & RL_ISR_SYSTEM_ERR) { ifp->if_drv_flags &= ~IFF_DRV_RUNNING; rl_init_locked(sc); RL_UNLOCK(sc); return; } } status = CSR_READ_2(sc, RL_ISR); /* If the card has gone away, the read returns 0xffff. */ if (status == 0xffff || (status & RL_INTRS) == 0) break; } if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) rl_start_locked(ifp); done_locked2: if (ifp->if_drv_flags & IFF_DRV_RUNNING) CSR_WRITE_2(sc, RL_IMR, RL_INTRS); done_locked: RL_UNLOCK(sc); } /* * Encapsulate an mbuf chain in a descriptor by coupling the mbuf data * pointers to the fragment pointers. */ static int rl_encap(struct rl_softc *sc, struct mbuf **m_head) { struct mbuf *m; bus_dma_segment_t txsegs[1]; int error, nsegs, padlen; RL_LOCK_ASSERT(sc); m = *m_head; padlen = 0; /* * Hardware doesn't auto-pad, so we have to make sure * pad short frames out to the minimum frame length. */ if (m->m_pkthdr.len < RL_MIN_FRAMELEN) padlen = RL_MIN_FRAMELEN - m->m_pkthdr.len; /* * The RealTek is brain damaged and wants longword-aligned * TX buffers, plus we can only have one fragment buffer * per packet. We have to copy pretty much all the time. */ if (m->m_next != NULL || (mtod(m, uintptr_t) & 3) != 0 || (padlen > 0 && M_TRAILINGSPACE(m) < padlen)) { m = m_defrag(*m_head, M_NOWAIT); if (m == NULL) { m_freem(*m_head); *m_head = NULL; return (ENOMEM); } } *m_head = m; if (padlen > 0) { /* * Make security-conscious people happy: zero out the * bytes in the pad area, since we don't know what * this mbuf cluster buffer's previous user might * have left in it. */ bzero(mtod(m, char *) + m->m_pkthdr.len, padlen); m->m_pkthdr.len += padlen; m->m_len = m->m_pkthdr.len; } error = bus_dmamap_load_mbuf_sg(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc), m, txsegs, &nsegs, 0); if (error != 0) return (error); if (nsegs == 0) { m_freem(*m_head); *m_head = NULL; return (EIO); } RL_CUR_TXMBUF(sc) = m; bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, RL_CUR_DMAMAP(sc), BUS_DMASYNC_PREWRITE); CSR_WRITE_4(sc, RL_CUR_TXADDR(sc), RL_ADDR_LO(txsegs[0].ds_addr)); return (0); } /* * Main transmit routine. */ static void rl_start(struct ifnet *ifp) { struct rl_softc *sc = ifp->if_softc; RL_LOCK(sc); rl_start_locked(ifp); RL_UNLOCK(sc); } static void rl_start_locked(struct ifnet *ifp) { struct rl_softc *sc = ifp->if_softc; struct mbuf *m_head = NULL; RL_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING || (sc->rl_flags & RL_FLAG_LINK) == 0) return; while (RL_CUR_TXMBUF(sc) == NULL) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); if (m_head == NULL) break; if (rl_encap(sc, &m_head)) { if (m_head == NULL) break; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); ifp->if_drv_flags |= IFF_DRV_OACTIVE; break; } /* Pass a copy of this mbuf chain to the bpf subsystem. */ BPF_MTAP(ifp, RL_CUR_TXMBUF(sc)); /* Transmit the frame. */ CSR_WRITE_4(sc, RL_CUR_TXSTAT(sc), RL_TXTHRESH(sc->rl_txthresh) | RL_CUR_TXMBUF(sc)->m_pkthdr.len); RL_INC(sc->rl_cdata.cur_tx); /* Set a timeout in case the chip goes out to lunch. */ sc->rl_watchdog_timer = 5; } /* * We broke out of the loop because all our TX slots are * full. Mark the NIC as busy until it drains some of the * packets from the queue. */ if (RL_CUR_TXMBUF(sc) != NULL) ifp->if_drv_flags |= IFF_DRV_OACTIVE; } static void rl_init(void *xsc) { struct rl_softc *sc = xsc; RL_LOCK(sc); rl_init_locked(sc); RL_UNLOCK(sc); } static void rl_init_locked(struct rl_softc *sc) { struct ifnet *ifp = sc->rl_ifp; struct mii_data *mii; uint32_t eaddr[2]; RL_LOCK_ASSERT(sc); mii = device_get_softc(sc->rl_miibus); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; /* * Cancel pending I/O and free all RX/TX buffers. */ rl_stop(sc); rl_reset(sc); if (sc->rl_twister_enable) { /* * Reset twister register tuning state. The twister * registers and their tuning are undocumented, but * are necessary to cope with bad links. rl_twister = * DONE here will disable this entirely. */ sc->rl_twister = CHK_LINK; } /* * Init our MAC address. Even though the chipset * documentation doesn't mention it, we need to enter "Config * register write enable" mode to modify the ID registers. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_WRITECFG); bzero(eaddr, sizeof(eaddr)); bcopy(IF_LLADDR(sc->rl_ifp), eaddr, ETHER_ADDR_LEN); CSR_WRITE_STREAM_4(sc, RL_IDR0, eaddr[0]); CSR_WRITE_STREAM_4(sc, RL_IDR4, eaddr[1]); CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); /* Init the RX memory block pointer register. */ CSR_WRITE_4(sc, RL_RXADDR, sc->rl_cdata.rl_rx_buf_paddr + RL_RX_8139_BUF_RESERVE); /* Init TX descriptors. */ rl_list_tx_init(sc); /* Init Rx memory block. */ rl_list_rx_init(sc); /* * Enable transmit and receive. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); /* * Set the initial TX and RX configuration. */ CSR_WRITE_4(sc, RL_TXCFG, RL_TXCFG_CONFIG); CSR_WRITE_4(sc, RL_RXCFG, RL_RXCFG_CONFIG); /* Set RX filter. */ rl_rxfilter(sc); #ifdef DEVICE_POLLING /* Disable interrupts if we are polling. */ if (ifp->if_capenable & IFCAP_POLLING) CSR_WRITE_2(sc, RL_IMR, 0); else #endif /* Enable interrupts. */ CSR_WRITE_2(sc, RL_IMR, RL_INTRS); /* Set initial TX threshold */ sc->rl_txthresh = RL_TX_THRESH_INIT; /* Start RX/TX process. */ CSR_WRITE_4(sc, RL_MISSEDPKT, 0); /* Enable receiver and transmitter. */ CSR_WRITE_1(sc, RL_COMMAND, RL_CMD_TX_ENB|RL_CMD_RX_ENB); sc->rl_flags &= ~RL_FLAG_LINK; mii_mediachg(mii); CSR_WRITE_1(sc, sc->rl_cfg1, RL_CFG1_DRVLOAD|RL_CFG1_FULLDUPLEX); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; callout_reset(&sc->rl_stat_callout, hz, rl_tick, sc); } /* * Set media options. */ static int rl_ifmedia_upd(struct ifnet *ifp) { struct rl_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); mii_mediachg(mii); RL_UNLOCK(sc); return (0); } /* * Report current media status. */ static void rl_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct rl_softc *sc = ifp->if_softc; struct mii_data *mii; mii = device_get_softc(sc->rl_miibus); RL_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; RL_UNLOCK(sc); } static int rl_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct ifreq *ifr = (struct ifreq *)data; struct mii_data *mii; struct rl_softc *sc = ifp->if_softc; int error = 0, mask; switch (command) { case SIOCSIFFLAGS: RL_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING && ((ifp->if_flags ^ sc->rl_if_flags) & (IFF_PROMISC | IFF_ALLMULTI))) rl_rxfilter(sc); else rl_init_locked(sc); } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) rl_stop(sc); sc->rl_if_flags = ifp->if_flags; RL_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: RL_LOCK(sc); rl_rxfilter(sc); RL_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: mii = device_get_softc(sc->rl_miibus); error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, command); break; case SIOCSIFCAP: mask = ifr->ifr_reqcap ^ ifp->if_capenable; #ifdef DEVICE_POLLING if (ifr->ifr_reqcap & IFCAP_POLLING && !(ifp->if_capenable & IFCAP_POLLING)) { error = ether_poll_register(rl_poll, ifp); if (error) return(error); RL_LOCK(sc); /* Disable interrupts */ CSR_WRITE_2(sc, RL_IMR, 0x0000); ifp->if_capenable |= IFCAP_POLLING; RL_UNLOCK(sc); return (error); } if (!(ifr->ifr_reqcap & IFCAP_POLLING) && ifp->if_capenable & IFCAP_POLLING) { error = ether_poll_deregister(ifp); /* Enable interrupts. */ RL_LOCK(sc); CSR_WRITE_2(sc, RL_IMR, RL_INTRS); ifp->if_capenable &= ~IFCAP_POLLING; RL_UNLOCK(sc); return (error); } #endif /* DEVICE_POLLING */ if ((mask & IFCAP_WOL) != 0 && (ifp->if_capabilities & IFCAP_WOL) != 0) { if ((mask & IFCAP_WOL_UCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_UCAST; if ((mask & IFCAP_WOL_MCAST) != 0) ifp->if_capenable ^= IFCAP_WOL_MCAST; if ((mask & IFCAP_WOL_MAGIC) != 0) ifp->if_capenable ^= IFCAP_WOL_MAGIC; } break; default: error = ether_ioctl(ifp, command, data); break; } return (error); } static void rl_watchdog(struct rl_softc *sc) { RL_LOCK_ASSERT(sc); if (sc->rl_watchdog_timer == 0 || --sc->rl_watchdog_timer >0) return; device_printf(sc->rl_dev, "watchdog timeout\n"); if_inc_counter(sc->rl_ifp, IFCOUNTER_OERRORS, 1); rl_txeof(sc); rl_rxeof(sc); sc->rl_ifp->if_drv_flags &= ~IFF_DRV_RUNNING; rl_init_locked(sc); } /* * Stop the adapter and free any mbufs allocated to the * RX and TX lists. */ static void rl_stop(struct rl_softc *sc) { int i; struct ifnet *ifp = sc->rl_ifp; RL_LOCK_ASSERT(sc); sc->rl_watchdog_timer = 0; callout_stop(&sc->rl_stat_callout); ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); sc->rl_flags &= ~RL_FLAG_LINK; CSR_WRITE_1(sc, RL_COMMAND, 0x00); CSR_WRITE_2(sc, RL_IMR, 0x0000); for (i = 0; i < RL_TIMEOUT; i++) { DELAY(10); if ((CSR_READ_1(sc, RL_COMMAND) & (RL_CMD_RX_ENB | RL_CMD_TX_ENB)) == 0) break; } if (i == RL_TIMEOUT) device_printf(sc->rl_dev, "Unable to stop Tx/Rx MAC\n"); /* * Free the TX list buffers. */ for (i = 0; i < RL_TX_LIST_CNT; i++) { if (sc->rl_cdata.rl_tx_chain[i] != NULL) { bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_tx_dmamap[i], BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, sc->rl_cdata.rl_tx_dmamap[i]); m_freem(sc->rl_cdata.rl_tx_chain[i]); sc->rl_cdata.rl_tx_chain[i] = NULL; CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000); } } } /* * Device suspend routine. Stop the interface and save some PCI * settings in case the BIOS doesn't restore them properly on * resume. */ static int rl_suspend(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); rl_stop(sc); rl_setwol(sc); sc->suspended = 1; RL_UNLOCK(sc); return (0); } /* * Device resume routine. Restore some PCI settings in case the BIOS * doesn't, re-enable busmastering, and restart the interface if * appropriate. */ static int rl_resume(device_t dev) { struct rl_softc *sc; struct ifnet *ifp; int pmc; uint16_t pmstat; sc = device_get_softc(dev); ifp = sc->rl_ifp; RL_LOCK(sc); if ((ifp->if_capabilities & IFCAP_WOL) != 0 && pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) == 0) { /* Disable PME and clear PME status. */ pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); if ((pmstat & PCIM_PSTAT_PMEENABLE) != 0) { pmstat &= ~PCIM_PSTAT_PMEENABLE; pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } /* * Clear WOL matching such that normal Rx filtering * wouldn't interfere with WOL patterns. */ rl_clrwol(sc); } /* reinitialize interface if necessary */ if (ifp->if_flags & IFF_UP) rl_init_locked(sc); sc->suspended = 0; RL_UNLOCK(sc); return (0); } /* * Stop all chip I/O so that the kernel's probe routines don't * get confused by errant DMAs when rebooting. */ static int rl_shutdown(device_t dev) { struct rl_softc *sc; sc = device_get_softc(dev); RL_LOCK(sc); rl_stop(sc); /* * Mark interface as down since otherwise we will panic if * interrupt comes in later on, which can happen in some * cases. */ sc->rl_ifp->if_flags &= ~IFF_UP; rl_setwol(sc); RL_UNLOCK(sc); return (0); } static void rl_setwol(struct rl_softc *sc) { struct ifnet *ifp; int pmc; uint16_t pmstat; uint8_t v; RL_LOCK_ASSERT(sc); ifp = sc->rl_ifp; if ((ifp->if_capabilities & IFCAP_WOL) == 0) return; if (pci_find_cap(sc->rl_dev, PCIY_PMG, &pmc) != 0) return; /* Enable config register write. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); /* Enable PME. */ v = CSR_READ_1(sc, sc->rl_cfg1); v &= ~RL_CFG1_PME; if ((ifp->if_capenable & IFCAP_WOL) != 0) v |= RL_CFG1_PME; CSR_WRITE_1(sc, sc->rl_cfg1, v); v = CSR_READ_1(sc, sc->rl_cfg3); v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); if ((ifp->if_capenable & IFCAP_WOL_MAGIC) != 0) v |= RL_CFG3_WOL_MAGIC; CSR_WRITE_1(sc, sc->rl_cfg3, v); v = CSR_READ_1(sc, sc->rl_cfg5); v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); v &= ~RL_CFG5_WOL_LANWAKE; if ((ifp->if_capenable & IFCAP_WOL_UCAST) != 0) v |= RL_CFG5_WOL_UCAST; if ((ifp->if_capenable & IFCAP_WOL_MCAST) != 0) v |= RL_CFG5_WOL_MCAST | RL_CFG5_WOL_BCAST; if ((ifp->if_capenable & IFCAP_WOL) != 0) v |= RL_CFG5_WOL_LANWAKE; CSR_WRITE_1(sc, sc->rl_cfg5, v); /* Config register write done. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); /* Request PME if WOL is requested. */ pmstat = pci_read_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, 2); pmstat &= ~(PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE); if ((ifp->if_capenable & IFCAP_WOL) != 0) pmstat |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; pci_write_config(sc->rl_dev, pmc + PCIR_POWER_STATUS, pmstat, 2); } static void rl_clrwol(struct rl_softc *sc) { struct ifnet *ifp; uint8_t v; ifp = sc->rl_ifp; if ((ifp->if_capabilities & IFCAP_WOL) == 0) return; /* Enable config register write. */ CSR_WRITE_1(sc, RL_EECMD, RL_EE_MODE); v = CSR_READ_1(sc, sc->rl_cfg3); v &= ~(RL_CFG3_WOL_LINK | RL_CFG3_WOL_MAGIC); CSR_WRITE_1(sc, sc->rl_cfg3, v); /* Config register write done. */ CSR_WRITE_1(sc, RL_EECMD, RL_EEMODE_OFF); v = CSR_READ_1(sc, sc->rl_cfg5); v &= ~(RL_CFG5_WOL_BCAST | RL_CFG5_WOL_MCAST | RL_CFG5_WOL_UCAST); v &= ~RL_CFG5_WOL_LANWAKE; CSR_WRITE_1(sc, sc->rl_cfg5, v); }