diff --git a/sys/arm/allwinner/aw_mmc.c b/sys/arm/allwinner/aw_mmc.c index 634afdfdfd46..9f61e1560658 100644 --- a/sys/arm/allwinner/aw_mmc.c +++ b/sys/arm/allwinner/aw_mmc.c @@ -1,1520 +1,1514 @@ /*- * SPDX-License-Identifier: BSD-2-Clause * * Copyright (c) 2018 Emmanuel Vadot * Copyright (c) 2013 Alexander Fedorov * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "opt_mmccam.h" #ifdef MMCCAM #include #include #include #include #include #include #include "mmc_sim_if.h" #endif #include "mmc_pwrseq_if.h" #define AW_MMC_MEMRES 0 #define AW_MMC_IRQRES 1 #define AW_MMC_RESSZ 2 #define AW_MMC_DMA_SEGS (PAGE_SIZE / sizeof(struct aw_mmc_dma_desc)) #define AW_MMC_DMA_DESC_SIZE (sizeof(struct aw_mmc_dma_desc) * AW_MMC_DMA_SEGS) #define AW_MMC_DMA_FTRGLEVEL 0x20070008 #define AW_MMC_RESET_RETRY 1000 #define CARD_ID_FREQUENCY 400000 struct aw_mmc_conf { uint32_t dma_xferlen; bool mask_data0; bool can_calibrate; bool new_timing; }; static const struct aw_mmc_conf a10_mmc_conf = { .dma_xferlen = 0x2000, }; static const struct aw_mmc_conf a13_mmc_conf = { .dma_xferlen = 0x10000, }; static const struct aw_mmc_conf a64_mmc_conf = { .dma_xferlen = 0x10000, .mask_data0 = true, .can_calibrate = true, .new_timing = true, }; static const struct aw_mmc_conf a64_emmc_conf = { .dma_xferlen = 0x2000, .can_calibrate = true, }; static struct ofw_compat_data compat_data[] = { {"allwinner,sun4i-a10-mmc", (uintptr_t)&a10_mmc_conf}, {"allwinner,sun5i-a13-mmc", (uintptr_t)&a13_mmc_conf}, {"allwinner,sun7i-a20-mmc", (uintptr_t)&a13_mmc_conf}, {"allwinner,sun50i-a64-mmc", (uintptr_t)&a64_mmc_conf}, {"allwinner,sun50i-a64-emmc", (uintptr_t)&a64_emmc_conf}, {NULL, 0} }; struct aw_mmc_softc { device_t aw_dev; clk_t aw_clk_ahb; clk_t aw_clk_mmc; hwreset_t aw_rst_ahb; int aw_bus_busy; int aw_resid; int aw_timeout; struct callout aw_timeoutc; struct mmc_host aw_host; struct mmc_helper mmc_helper; #ifdef MMCCAM union ccb * ccb; struct mmc_sim mmc_sim; #else struct mmc_request * aw_req; #endif struct mtx aw_mtx; struct resource * aw_res[AW_MMC_RESSZ]; struct aw_mmc_conf * aw_mmc_conf; uint32_t aw_intr; uint32_t aw_intr_wait; void * aw_intrhand; unsigned int aw_clock; device_t child; /* Fields required for DMA access. */ bus_addr_t aw_dma_desc_phys; bus_dmamap_t aw_dma_map; bus_dma_tag_t aw_dma_tag; void * aw_dma_desc; bus_dmamap_t aw_dma_buf_map; bus_dma_tag_t aw_dma_buf_tag; int aw_dma_map_err; }; static struct resource_spec aw_mmc_res_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { -1, 0, 0 } }; static int aw_mmc_probe(device_t); static int aw_mmc_attach(device_t); static int aw_mmc_detach(device_t); static int aw_mmc_setup_dma(struct aw_mmc_softc *); static void aw_mmc_teardown_dma(struct aw_mmc_softc *sc); static int aw_mmc_reset(struct aw_mmc_softc *); static int aw_mmc_init(struct aw_mmc_softc *); static void aw_mmc_intr(void *); static int aw_mmc_update_clock(struct aw_mmc_softc *, uint32_t); static void aw_mmc_helper_cd_handler(device_t, bool); static void aw_mmc_print_error(uint32_t); static int aw_mmc_update_ios(device_t, device_t); static int aw_mmc_request(device_t, device_t, struct mmc_request *); #ifndef MMCCAM static int aw_mmc_get_ro(device_t, device_t); static int aw_mmc_acquire_host(device_t, device_t); static int aw_mmc_release_host(device_t, device_t); #endif #define AW_MMC_LOCK(_sc) mtx_lock(&(_sc)->aw_mtx) #define AW_MMC_UNLOCK(_sc) mtx_unlock(&(_sc)->aw_mtx) #define AW_MMC_READ_4(_sc, _reg) \ bus_read_4((_sc)->aw_res[AW_MMC_MEMRES], _reg) #define AW_MMC_WRITE_4(_sc, _reg, _value) \ bus_write_4((_sc)->aw_res[AW_MMC_MEMRES], _reg, _value) SYSCTL_NODE(_hw, OID_AUTO, aw_mmc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "aw_mmc driver"); static int aw_mmc_debug = 0; SYSCTL_INT(_hw_aw_mmc, OID_AUTO, debug, CTLFLAG_RWTUN, &aw_mmc_debug, 0, "Debug level bit0=card changes bit1=ios changes, bit2=interrupts, bit3=commands"); #define AW_MMC_DEBUG_CARD 0x1 #define AW_MMC_DEBUG_IOS 0x2 #define AW_MMC_DEBUG_INT 0x4 #define AW_MMC_DEBUG_CMD 0x8 #ifdef MMCCAM static int aw_mmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts) { struct aw_mmc_softc *sc; sc = device_get_softc(dev); cts->host_ocr = sc->aw_host.host_ocr; cts->host_f_min = sc->aw_host.f_min; cts->host_f_max = sc->aw_host.f_max; cts->host_caps = sc->aw_host.caps; cts->host_max_data = (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE; memcpy(&cts->ios, &sc->aw_host.ios, sizeof(struct mmc_ios)); return (0); } static int aw_mmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts) { struct aw_mmc_softc *sc; struct mmc_ios *ios; struct mmc_ios *new_ios; sc = device_get_softc(dev); ios = &sc->aw_host.ios; new_ios = &cts->ios; /* Update only requested fields */ if (cts->ios_valid & MMC_CLK) { ios->clock = new_ios->clock; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "Clock => %d\n", ios->clock); } if (cts->ios_valid & MMC_VDD) { ios->vdd = new_ios->vdd; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "VDD => %d\n", ios->vdd); } if (cts->ios_valid & MMC_CS) { ios->chip_select = new_ios->chip_select; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "CS => %d\n", ios->chip_select); } if (cts->ios_valid & MMC_BW) { ios->bus_width = new_ios->bus_width; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "Bus width => %d\n", ios->bus_width); } if (cts->ios_valid & MMC_PM) { ios->power_mode = new_ios->power_mode; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "Power mode => %d\n", ios->power_mode); } if (cts->ios_valid & MMC_BT) { ios->timing = new_ios->timing; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "Timing => %d\n", ios->timing); } if (cts->ios_valid & MMC_BM) { ios->bus_mode = new_ios->bus_mode; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_IOS)) device_printf(sc->aw_dev, "Bus mode => %d\n", ios->bus_mode); } return (aw_mmc_update_ios(sc->aw_dev, NULL)); } static int aw_mmc_cam_request(device_t dev, union ccb *ccb) { struct aw_mmc_softc *sc; struct ccb_mmcio *mmcio; sc = device_get_softc(dev); mmcio = &ccb->mmcio; AW_MMC_LOCK(sc); if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) { device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags, mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0, mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0); } if (mmcio->cmd.data != NULL) { if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0) panic("data->len = %d, data->flags = %d -- something is b0rked", (int)mmcio->cmd.data->len, mmcio->cmd.data->flags); } if (sc->ccb != NULL) { device_printf(sc->aw_dev, "Controller still has an active command\n"); return (EBUSY); } sc->ccb = ccb; /* aw_mmc_request locks again */ AW_MMC_UNLOCK(sc); aw_mmc_request(sc->aw_dev, NULL, NULL); return (0); } static void aw_mmc_cam_poll(device_t dev) { struct aw_mmc_softc *sc; sc = device_get_softc(dev); aw_mmc_intr(sc); } #endif /* MMCCAM */ static void aw_mmc_helper_cd_handler(device_t dev, bool present) { struct aw_mmc_softc *sc; sc = device_get_softc(dev); #ifdef MMCCAM mmc_cam_sim_discover(&sc->mmc_sim); #else AW_MMC_LOCK(sc); if (present) { if (sc->child == NULL) { if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD)) device_printf(sc->aw_dev, "Card inserted\n"); sc->child = device_add_child(sc->aw_dev, "mmc", DEVICE_UNIT_ANY); AW_MMC_UNLOCK(sc); if (sc->child) { device_set_ivars(sc->child, sc); (void)device_probe_and_attach(sc->child); } } else AW_MMC_UNLOCK(sc); } else { /* Card isn't present, detach if necessary */ if (sc->child != NULL) { if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD)) device_printf(sc->aw_dev, "Card removed\n"); AW_MMC_UNLOCK(sc); device_delete_child(sc->aw_dev, sc->child); sc->child = NULL; } else AW_MMC_UNLOCK(sc); } #endif /* MMCCAM */ } static int aw_mmc_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "Allwinner Integrated MMC/SD controller"); return (BUS_PROBE_DEFAULT); } static int aw_mmc_attach(device_t dev) { struct aw_mmc_softc *sc; struct sysctl_ctx_list *ctx; struct sysctl_oid_list *tree; int error; sc = device_get_softc(dev); sc->aw_dev = dev; sc->aw_mmc_conf = (struct aw_mmc_conf *)ofw_bus_search_compatible(dev, compat_data)->ocd_data; #ifndef MMCCAM sc->aw_req = NULL; #endif if (bus_alloc_resources(dev, aw_mmc_res_spec, sc->aw_res) != 0) { device_printf(dev, "cannot allocate device resources\n"); return (ENXIO); } if (bus_setup_intr(dev, sc->aw_res[AW_MMC_IRQRES], INTR_TYPE_NET | INTR_MPSAFE, NULL, aw_mmc_intr, sc, &sc->aw_intrhand)) { bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res); device_printf(dev, "cannot setup interrupt handler\n"); return (ENXIO); } mtx_init(&sc->aw_mtx, device_get_nameunit(sc->aw_dev), "aw_mmc", MTX_DEF); callout_init_mtx(&sc->aw_timeoutc, &sc->aw_mtx, 0); /* De-assert reset */ if (hwreset_get_by_ofw_name(dev, 0, "ahb", &sc->aw_rst_ahb) == 0) { error = hwreset_deassert(sc->aw_rst_ahb); if (error != 0) { device_printf(dev, "cannot de-assert reset\n"); goto fail; } } /* Activate the module clock. */ error = clk_get_by_ofw_name(dev, 0, "ahb", &sc->aw_clk_ahb); if (error != 0) { device_printf(dev, "cannot get ahb clock\n"); goto fail; } error = clk_enable(sc->aw_clk_ahb); if (error != 0) { device_printf(dev, "cannot enable ahb clock\n"); goto fail; } error = clk_get_by_ofw_name(dev, 0, "mmc", &sc->aw_clk_mmc); if (error != 0) { device_printf(dev, "cannot get mmc clock\n"); goto fail; } error = clk_set_freq(sc->aw_clk_mmc, CARD_ID_FREQUENCY, CLK_SET_ROUND_DOWN); if (error != 0) { device_printf(dev, "cannot init mmc clock\n"); goto fail; } error = clk_enable(sc->aw_clk_mmc); if (error != 0) { device_printf(dev, "cannot enable mmc clock\n"); goto fail; } sc->aw_timeout = 10; ctx = device_get_sysctl_ctx(dev); tree = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "req_timeout", CTLFLAG_RW, &sc->aw_timeout, 0, "Request timeout in seconds"); /* Soft Reset controller. */ if (aw_mmc_reset(sc) != 0) { device_printf(dev, "cannot reset the controller\n"); goto fail; } if (aw_mmc_setup_dma(sc) != 0) { device_printf(sc->aw_dev, "Couldn't setup DMA!\n"); goto fail; } /* Set some defaults for freq and supported mode */ sc->aw_host.f_min = 400000; sc->aw_host.f_max = 52000000; sc->aw_host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; sc->aw_host.caps |= MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330; mmc_fdt_parse(dev, 0, &sc->mmc_helper, &sc->aw_host); mmc_fdt_gpio_setup(dev, 0, &sc->mmc_helper, aw_mmc_helper_cd_handler); #ifdef MMCCAM sc->ccb = NULL; if (mmc_cam_sim_alloc(dev, "aw_mmc", &sc->mmc_sim) != 0) { device_printf(dev, "cannot alloc cam sim\n"); goto fail; } #endif /* MMCCAM */ return (0); fail: callout_drain(&sc->aw_timeoutc); mtx_destroy(&sc->aw_mtx); bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand); bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res); return (ENXIO); } static int aw_mmc_detach(device_t dev) { struct aw_mmc_softc *sc; - device_t d; sc = device_get_softc(dev); clk_disable(sc->aw_clk_mmc); clk_disable(sc->aw_clk_ahb); hwreset_assert(sc->aw_rst_ahb); mmc_fdt_gpio_teardown(&sc->mmc_helper); callout_drain(&sc->aw_timeoutc); - AW_MMC_LOCK(sc); - d = sc->child; - sc->child = NULL; - AW_MMC_UNLOCK(sc); - if (d != NULL) - device_delete_child(sc->aw_dev, d); + device_delete_children(sc->aw_dev); aw_mmc_teardown_dma(sc); mtx_destroy(&sc->aw_mtx); bus_teardown_intr(dev, sc->aw_res[AW_MMC_IRQRES], sc->aw_intrhand); bus_release_resources(dev, aw_mmc_res_spec, sc->aw_res); #ifdef MMCCAM mmc_cam_sim_free(&sc->mmc_sim); #endif return (0); } static void aw_dma_desc_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) { struct aw_mmc_softc *sc; sc = (struct aw_mmc_softc *)arg; if (err) { sc->aw_dma_map_err = err; return; } sc->aw_dma_desc_phys = segs[0].ds_addr; } static int aw_mmc_setup_dma(struct aw_mmc_softc *sc) { int error; /* Allocate the DMA descriptor memory. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->aw_dev), /* parent */ AW_MMC_DMA_ALIGN, 0, /* align, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg*/ AW_MMC_DMA_DESC_SIZE, 1, /* maxsize, nsegment */ AW_MMC_DMA_DESC_SIZE, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lock, lockarg*/ &sc->aw_dma_tag); if (error) return (error); error = bus_dmamem_alloc(sc->aw_dma_tag, &sc->aw_dma_desc, BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO, &sc->aw_dma_map); if (error) return (error); error = bus_dmamap_load(sc->aw_dma_tag, sc->aw_dma_map, sc->aw_dma_desc, AW_MMC_DMA_DESC_SIZE, aw_dma_desc_cb, sc, 0); if (error) return (error); if (sc->aw_dma_map_err) return (sc->aw_dma_map_err); /* Create the DMA map for data transfers. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->aw_dev), /* parent */ AW_MMC_DMA_ALIGN, 0, /* align, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg*/ sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS, AW_MMC_DMA_SEGS, /* maxsize, nsegments */ sc->aw_mmc_conf->dma_xferlen, /* maxsegsize */ BUS_DMA_ALLOCNOW, /* flags */ NULL, NULL, /* lock, lockarg*/ &sc->aw_dma_buf_tag); if (error) return (error); error = bus_dmamap_create(sc->aw_dma_buf_tag, 0, &sc->aw_dma_buf_map); if (error) return (error); return (0); } static void aw_mmc_teardown_dma(struct aw_mmc_softc *sc) { bus_dmamap_unload(sc->aw_dma_tag, sc->aw_dma_map); bus_dmamem_free(sc->aw_dma_tag, sc->aw_dma_desc, sc->aw_dma_map); if (bus_dma_tag_destroy(sc->aw_dma_tag) != 0) device_printf(sc->aw_dev, "Cannot destroy the dma tag\n"); bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map); bus_dmamap_destroy(sc->aw_dma_buf_tag, sc->aw_dma_buf_map); if (bus_dma_tag_destroy(sc->aw_dma_buf_tag) != 0) device_printf(sc->aw_dev, "Cannot destroy the dma buf tag\n"); } static void aw_dma_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int err) { int i; struct aw_mmc_dma_desc *dma_desc; struct aw_mmc_softc *sc; sc = (struct aw_mmc_softc *)arg; sc->aw_dma_map_err = err; if (err) return; dma_desc = sc->aw_dma_desc; for (i = 0; i < nsegs; i++) { if (segs[i].ds_len == sc->aw_mmc_conf->dma_xferlen) dma_desc[i].buf_size = 0; /* Size of 0 indicate max len */ else dma_desc[i].buf_size = segs[i].ds_len; dma_desc[i].buf_addr = segs[i].ds_addr; dma_desc[i].config = AW_MMC_DMA_CONFIG_CH | AW_MMC_DMA_CONFIG_OWN | AW_MMC_DMA_CONFIG_DIC; dma_desc[i].next = sc->aw_dma_desc_phys + ((i + 1) * sizeof(struct aw_mmc_dma_desc)); } dma_desc[0].config |= AW_MMC_DMA_CONFIG_FD; dma_desc[nsegs - 1].config |= AW_MMC_DMA_CONFIG_LD | AW_MMC_DMA_CONFIG_ER; dma_desc[nsegs - 1].config &= ~AW_MMC_DMA_CONFIG_DIC; dma_desc[nsegs - 1].next = 0; } static int aw_mmc_prepare_dma(struct aw_mmc_softc *sc) { bus_dmasync_op_t sync_op; int error; struct mmc_command *cmd; uint32_t val; #ifdef MMCCAM cmd = &sc->ccb->mmcio.cmd; #else cmd = sc->aw_req->cmd; #endif if (cmd->data->len > (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS)) return (EFBIG); error = bus_dmamap_load(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, cmd->data->data, cmd->data->len, aw_dma_cb, sc, 0); if (error) return (error); if (sc->aw_dma_map_err) return (sc->aw_dma_map_err); if (cmd->data->flags & MMC_DATA_WRITE) sync_op = BUS_DMASYNC_PREWRITE; else sync_op = BUS_DMASYNC_PREREAD; bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op); bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_PREWRITE); /* Enable DMA */ val = AW_MMC_READ_4(sc, AW_MMC_GCTL); val &= ~AW_MMC_GCTL_FIFO_AC_MOD; val |= AW_MMC_GCTL_DMA_ENB; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val); /* Reset DMA */ val |= AW_MMC_GCTL_DMA_RST; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val); AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_SOFT_RST); AW_MMC_WRITE_4(sc, AW_MMC_DMAC, AW_MMC_DMAC_IDMAC_IDMA_ON | AW_MMC_DMAC_IDMAC_FIX_BURST); /* Enable RX or TX DMA interrupt */ val = AW_MMC_READ_4(sc, AW_MMC_IDIE); if (cmd->data->flags & MMC_DATA_WRITE) val |= AW_MMC_IDST_TX_INT; else val |= AW_MMC_IDST_RX_INT; AW_MMC_WRITE_4(sc, AW_MMC_IDIE, val); /* Set DMA descritptor list address */ AW_MMC_WRITE_4(sc, AW_MMC_DLBA, sc->aw_dma_desc_phys); /* FIFO trigger level */ AW_MMC_WRITE_4(sc, AW_MMC_FWLR, AW_MMC_DMA_FTRGLEVEL); return (0); } static int aw_mmc_reset(struct aw_mmc_softc *sc) { uint32_t reg; int timeout; reg = AW_MMC_READ_4(sc, AW_MMC_GCTL); reg |= AW_MMC_GCTL_RESET; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg); timeout = AW_MMC_RESET_RETRY; while (--timeout > 0) { if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0) break; DELAY(100); } if (timeout == 0) return (ETIMEDOUT); return (0); } static int aw_mmc_init(struct aw_mmc_softc *sc) { uint32_t reg; int ret; ret = aw_mmc_reset(sc); if (ret != 0) return (ret); /* Set the timeout. */ AW_MMC_WRITE_4(sc, AW_MMC_TMOR, AW_MMC_TMOR_DTO_LMT_SHIFT(AW_MMC_TMOR_DTO_LMT_MASK) | AW_MMC_TMOR_RTO_LMT_SHIFT(AW_MMC_TMOR_RTO_LMT_MASK)); /* Unmask interrupts. */ AW_MMC_WRITE_4(sc, AW_MMC_IMKR, 0); /* Clear pending interrupts. */ AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); /* Debug register, undocumented */ AW_MMC_WRITE_4(sc, AW_MMC_DBGC, 0xdeb); /* Function select register */ AW_MMC_WRITE_4(sc, AW_MMC_FUNS, 0xceaa0000); AW_MMC_WRITE_4(sc, AW_MMC_IDST, 0xffffffff); /* Enable interrupts and disable AHB access. */ reg = AW_MMC_READ_4(sc, AW_MMC_GCTL); reg |= AW_MMC_GCTL_INT_ENB; reg &= ~AW_MMC_GCTL_FIFO_AC_MOD; reg &= ~AW_MMC_GCTL_WAIT_MEM_ACCESS; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg); return (0); } static void aw_mmc_req_done(struct aw_mmc_softc *sc) { struct mmc_command *cmd; #ifdef MMCCAM union ccb *ccb; #else struct mmc_request *req; #endif uint32_t val, mask; int retry; #ifdef MMCCAM ccb = sc->ccb; cmd = &ccb->mmcio.cmd; #else cmd = sc->aw_req->cmd; #endif if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) { device_printf(sc->aw_dev, "%s: cmd %d err %d\n", __func__, cmd->opcode, cmd->error); } if (cmd->error != MMC_ERR_NONE) { /* Reset the FIFO and DMA engines. */ mask = AW_MMC_GCTL_FIFO_RST | AW_MMC_GCTL_DMA_RST; val = AW_MMC_READ_4(sc, AW_MMC_GCTL); AW_MMC_WRITE_4(sc, AW_MMC_GCTL, val | mask); retry = AW_MMC_RESET_RETRY; while (--retry > 0) { if ((AW_MMC_READ_4(sc, AW_MMC_GCTL) & AW_MMC_GCTL_RESET) == 0) break; DELAY(100); } if (retry == 0) device_printf(sc->aw_dev, "timeout resetting DMA/FIFO\n"); aw_mmc_update_clock(sc, 1); } if (!dumping) callout_stop(&sc->aw_timeoutc); sc->aw_intr = 0; sc->aw_resid = 0; sc->aw_dma_map_err = 0; sc->aw_intr_wait = 0; #ifdef MMCCAM sc->ccb = NULL; ccb->ccb_h.status = (ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR); xpt_done(ccb); #else req = sc->aw_req; sc->aw_req = NULL; req->done(req); #endif } static void aw_mmc_req_ok(struct aw_mmc_softc *sc) { int timeout; struct mmc_command *cmd; uint32_t status; timeout = 1000; while (--timeout > 0) { status = AW_MMC_READ_4(sc, AW_MMC_STAR); if ((status & AW_MMC_STAR_CARD_BUSY) == 0) break; DELAY(1000); } #ifdef MMCCAM cmd = &sc->ccb->mmcio.cmd; #else cmd = sc->aw_req->cmd; #endif if (timeout == 0) { cmd->error = MMC_ERR_FAILED; aw_mmc_req_done(sc); return; } if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP3); cmd->resp[1] = AW_MMC_READ_4(sc, AW_MMC_RESP2); cmd->resp[2] = AW_MMC_READ_4(sc, AW_MMC_RESP1); cmd->resp[3] = AW_MMC_READ_4(sc, AW_MMC_RESP0); } else cmd->resp[0] = AW_MMC_READ_4(sc, AW_MMC_RESP0); } /* All data has been transferred ? */ if (cmd->data != NULL && (sc->aw_resid << 2) < cmd->data->len) cmd->error = MMC_ERR_FAILED; aw_mmc_req_done(sc); } static inline void set_mmc_error(struct aw_mmc_softc *sc, int error_code) { #ifdef MMCCAM sc->ccb->mmcio.cmd.error = error_code; #else sc->aw_req->cmd->error = error_code; #endif } static void aw_mmc_timeout(void *arg) { struct aw_mmc_softc *sc; sc = (struct aw_mmc_softc *)arg; #ifdef MMCCAM if (sc->ccb != NULL) { #else if (sc->aw_req != NULL) { #endif device_printf(sc->aw_dev, "controller timeout\n"); set_mmc_error(sc, MMC_ERR_TIMEOUT); aw_mmc_req_done(sc); } else device_printf(sc->aw_dev, "Spurious timeout - no active request\n"); } static void aw_mmc_print_error(uint32_t err) { if(err & AW_MMC_INT_RESP_ERR) printf("AW_MMC_INT_RESP_ERR "); if (err & AW_MMC_INT_RESP_CRC_ERR) printf("AW_MMC_INT_RESP_CRC_ERR "); if (err & AW_MMC_INT_DATA_CRC_ERR) printf("AW_MMC_INT_DATA_CRC_ERR "); if (err & AW_MMC_INT_RESP_TIMEOUT) printf("AW_MMC_INT_RESP_TIMEOUT "); if (err & AW_MMC_INT_FIFO_RUN_ERR) printf("AW_MMC_INT_FIFO_RUN_ERR "); if (err & AW_MMC_INT_CMD_BUSY) printf("AW_MMC_INT_CMD_BUSY "); if (err & AW_MMC_INT_DATA_START_ERR) printf("AW_MMC_INT_DATA_START_ERR "); if (err & AW_MMC_INT_DATA_END_BIT_ERR) printf("AW_MMC_INT_DATA_END_BIT_ERR"); printf("\n"); } static void aw_mmc_intr(void *arg) { bus_dmasync_op_t sync_op; struct aw_mmc_softc *sc; struct mmc_data *data; uint32_t idst, imask, rint; sc = (struct aw_mmc_softc *)arg; AW_MMC_LOCK(sc); rint = AW_MMC_READ_4(sc, AW_MMC_RISR); idst = AW_MMC_READ_4(sc, AW_MMC_IDST); imask = AW_MMC_READ_4(sc, AW_MMC_IMKR); if (idst == 0 && imask == 0 && rint == 0) { AW_MMC_UNLOCK(sc); return; } if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) { device_printf(sc->aw_dev, "idst: %#x, imask: %#x, rint: %#x\n", idst, imask, rint); } #ifdef MMCCAM if (sc->ccb == NULL) { #else if (sc->aw_req == NULL) { #endif device_printf(sc->aw_dev, "Spurious interrupt - no active request, rint: 0x%08X\n", rint); aw_mmc_print_error(rint); goto end; } if (rint & AW_MMC_INT_ERR_BIT) { if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) { device_printf(sc->aw_dev, "error rint: 0x%08X\n", rint); aw_mmc_print_error(rint); } if (rint & AW_MMC_INT_RESP_TIMEOUT) set_mmc_error(sc, MMC_ERR_TIMEOUT); else set_mmc_error(sc, MMC_ERR_FAILED); aw_mmc_req_done(sc); goto end; } if (idst & AW_MMC_IDST_ERROR) { if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_INT)) device_printf(sc->aw_dev, "error idst: 0x%08x\n", idst); set_mmc_error(sc, MMC_ERR_FAILED); aw_mmc_req_done(sc); goto end; } sc->aw_intr |= rint; #ifdef MMCCAM data = sc->ccb->mmcio.cmd.data; #else data = sc->aw_req->cmd->data; #endif if (data != NULL && (idst & AW_MMC_IDST_COMPLETE) != 0) { if (data->flags & MMC_DATA_WRITE) sync_op = BUS_DMASYNC_POSTWRITE; else sync_op = BUS_DMASYNC_POSTREAD; bus_dmamap_sync(sc->aw_dma_buf_tag, sc->aw_dma_buf_map, sync_op); bus_dmamap_sync(sc->aw_dma_tag, sc->aw_dma_map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->aw_dma_buf_tag, sc->aw_dma_buf_map); sc->aw_resid = data->len >> 2; } if ((sc->aw_intr & sc->aw_intr_wait) == sc->aw_intr_wait) aw_mmc_req_ok(sc); end: AW_MMC_WRITE_4(sc, AW_MMC_IDST, idst); AW_MMC_WRITE_4(sc, AW_MMC_RISR, rint); AW_MMC_UNLOCK(sc); } static int aw_mmc_request(device_t bus, device_t child, struct mmc_request *req) { int blksz; struct aw_mmc_softc *sc; struct mmc_command *cmd; uint32_t cmdreg, imask; int err; sc = device_get_softc(bus); AW_MMC_LOCK(sc); #ifdef MMCCAM KASSERT(req == NULL, ("req should be NULL in MMCCAM case!")); /* * For MMCCAM, sc->ccb has been NULL-checked and populated * by aw_mmc_cam_request() already. */ cmd = &sc->ccb->mmcio.cmd; #else if (sc->aw_req) { AW_MMC_UNLOCK(sc); return (EBUSY); } sc->aw_req = req; cmd = req->cmd; if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CMD)) { device_printf(sc->aw_dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", cmd->opcode, cmd->arg, cmd->flags, cmd->data != NULL ? (unsigned int)cmd->data->len : 0, cmd->data != NULL ? cmd->data->flags: 0); } #endif cmdreg = AW_MMC_CMDR_LOAD; imask = AW_MMC_INT_ERR_BIT; sc->aw_intr_wait = 0; sc->aw_intr = 0; sc->aw_resid = 0; cmd->error = MMC_ERR_NONE; if (cmd->opcode == MMC_GO_IDLE_STATE) cmdreg |= AW_MMC_CMDR_SEND_INIT_SEQ; if (cmd->flags & MMC_RSP_PRESENT) cmdreg |= AW_MMC_CMDR_RESP_RCV; if (cmd->flags & MMC_RSP_136) cmdreg |= AW_MMC_CMDR_LONG_RESP; if (cmd->flags & MMC_RSP_CRC) cmdreg |= AW_MMC_CMDR_CHK_RESP_CRC; if (cmd->data) { cmdreg |= AW_MMC_CMDR_DATA_TRANS | AW_MMC_CMDR_WAIT_PRE_OVER; if (cmd->data->flags & MMC_DATA_MULTI) { cmdreg |= AW_MMC_CMDR_STOP_CMD_FLAG; imask |= AW_MMC_INT_AUTO_STOP_DONE; sc->aw_intr_wait |= AW_MMC_INT_AUTO_STOP_DONE; } else { sc->aw_intr_wait |= AW_MMC_INT_DATA_OVER; imask |= AW_MMC_INT_DATA_OVER; } if (cmd->data->flags & MMC_DATA_WRITE) cmdreg |= AW_MMC_CMDR_DIR_WRITE; #ifdef MMCCAM if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) { AW_MMC_WRITE_4(sc, AW_MMC_BKSR, cmd->data->block_size); AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len); } else #endif { blksz = min(cmd->data->len, MMC_SECTOR_SIZE); AW_MMC_WRITE_4(sc, AW_MMC_BKSR, blksz); AW_MMC_WRITE_4(sc, AW_MMC_BYCR, cmd->data->len); } } else { imask |= AW_MMC_INT_CMD_DONE; } /* Enable the interrupts we are interested in */ AW_MMC_WRITE_4(sc, AW_MMC_IMKR, imask); AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); /* Enable auto stop if needed */ AW_MMC_WRITE_4(sc, AW_MMC_A12A, cmdreg & AW_MMC_CMDR_STOP_CMD_FLAG ? 0 : 0xffff); /* Write the command argument */ AW_MMC_WRITE_4(sc, AW_MMC_CAGR, cmd->arg); /* * If we don't have data start the request * if we do prepare the dma request and start the request */ if (cmd->data == NULL) { AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode); } else { err = aw_mmc_prepare_dma(sc); if (err != 0) device_printf(sc->aw_dev, "prepare_dma failed: %d\n", err); AW_MMC_WRITE_4(sc, AW_MMC_CMDR, cmdreg | cmd->opcode); } if (!dumping) { callout_reset(&sc->aw_timeoutc, sc->aw_timeout * hz, aw_mmc_timeout, sc); } AW_MMC_UNLOCK(sc); return (0); } static int aw_mmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) { struct aw_mmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: *(int *)result = sc->aw_host.ios.bus_mode; break; case MMCBR_IVAR_BUS_WIDTH: *(int *)result = sc->aw_host.ios.bus_width; break; case MMCBR_IVAR_CHIP_SELECT: *(int *)result = sc->aw_host.ios.chip_select; break; case MMCBR_IVAR_CLOCK: *(int *)result = sc->aw_host.ios.clock; break; case MMCBR_IVAR_F_MIN: *(int *)result = sc->aw_host.f_min; break; case MMCBR_IVAR_F_MAX: *(int *)result = sc->aw_host.f_max; break; case MMCBR_IVAR_HOST_OCR: *(int *)result = sc->aw_host.host_ocr; break; case MMCBR_IVAR_MODE: *(int *)result = sc->aw_host.mode; break; case MMCBR_IVAR_OCR: *(int *)result = sc->aw_host.ocr; break; case MMCBR_IVAR_POWER_MODE: *(int *)result = sc->aw_host.ios.power_mode; break; case MMCBR_IVAR_VDD: *(int *)result = sc->aw_host.ios.vdd; break; case MMCBR_IVAR_VCCQ: *(int *)result = sc->aw_host.ios.vccq; break; case MMCBR_IVAR_CAPS: *(int *)result = sc->aw_host.caps; break; case MMCBR_IVAR_TIMING: *(int *)result = sc->aw_host.ios.timing; break; case MMCBR_IVAR_MAX_DATA: *(int *)result = (sc->aw_mmc_conf->dma_xferlen * AW_MMC_DMA_SEGS) / MMC_SECTOR_SIZE; break; case MMCBR_IVAR_RETUNE_REQ: *(int *)result = retune_req_none; break; } return (0); } static int aw_mmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value) { struct aw_mmc_softc *sc; sc = device_get_softc(bus); switch (which) { default: return (EINVAL); case MMCBR_IVAR_BUS_MODE: sc->aw_host.ios.bus_mode = value; break; case MMCBR_IVAR_BUS_WIDTH: sc->aw_host.ios.bus_width = value; break; case MMCBR_IVAR_CHIP_SELECT: sc->aw_host.ios.chip_select = value; break; case MMCBR_IVAR_CLOCK: sc->aw_host.ios.clock = value; break; case MMCBR_IVAR_MODE: sc->aw_host.mode = value; break; case MMCBR_IVAR_OCR: sc->aw_host.ocr = value; break; case MMCBR_IVAR_POWER_MODE: sc->aw_host.ios.power_mode = value; break; case MMCBR_IVAR_VDD: sc->aw_host.ios.vdd = value; break; case MMCBR_IVAR_VCCQ: sc->aw_host.ios.vccq = value; break; case MMCBR_IVAR_TIMING: sc->aw_host.ios.timing = value; break; /* These are read-only */ case MMCBR_IVAR_CAPS: case MMCBR_IVAR_HOST_OCR: case MMCBR_IVAR_F_MIN: case MMCBR_IVAR_F_MAX: case MMCBR_IVAR_MAX_DATA: return (EINVAL); } return (0); } static int aw_mmc_update_clock(struct aw_mmc_softc *sc, uint32_t clkon) { uint32_t reg; int retry; reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); reg &= ~(AW_MMC_CKCR_ENB | AW_MMC_CKCR_LOW_POWER | AW_MMC_CKCR_MASK_DATA0); if (clkon) reg |= AW_MMC_CKCR_ENB; if (sc->aw_mmc_conf->mask_data0) reg |= AW_MMC_CKCR_MASK_DATA0; AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); reg = AW_MMC_CMDR_LOAD | AW_MMC_CMDR_PRG_CLK | AW_MMC_CMDR_WAIT_PRE_OVER; AW_MMC_WRITE_4(sc, AW_MMC_CMDR, reg); retry = 0xfffff; while (reg & AW_MMC_CMDR_LOAD && --retry > 0) { reg = AW_MMC_READ_4(sc, AW_MMC_CMDR); DELAY(10); } AW_MMC_WRITE_4(sc, AW_MMC_RISR, 0xffffffff); if (reg & AW_MMC_CMDR_LOAD) { device_printf(sc->aw_dev, "timeout updating clock\n"); return (ETIMEDOUT); } if (sc->aw_mmc_conf->mask_data0) { reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); reg &= ~AW_MMC_CKCR_MASK_DATA0; AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); } return (0); } #ifndef MMCCAM static int aw_mmc_switch_vccq(device_t bus, device_t child) { struct aw_mmc_softc *sc; int uvolt, err; sc = device_get_softc(bus); if (sc->mmc_helper.vqmmc_supply == NULL) return EOPNOTSUPP; switch (sc->aw_host.ios.vccq) { case vccq_180: uvolt = 1800000; break; case vccq_330: uvolt = 3300000; break; default: return EINVAL; } err = regulator_set_voltage(sc->mmc_helper.vqmmc_supply, uvolt, uvolt); if (err != 0) { device_printf(sc->aw_dev, "Cannot set vqmmc to %d<->%d\n", uvolt, uvolt); return (err); } return (0); } #endif static int aw_mmc_update_ios(device_t bus, device_t child) { int error; struct aw_mmc_softc *sc; struct mmc_ios *ios; unsigned int clock; uint32_t reg, div = 1; int reg_status; int rv; sc = device_get_softc(bus); ios = &sc->aw_host.ios; /* Set the bus width. */ switch (ios->bus_width) { case bus_width_1: AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR1); break; case bus_width_4: AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR4); break; case bus_width_8: AW_MMC_WRITE_4(sc, AW_MMC_BWDR, AW_MMC_BWDR8); break; } switch (ios->power_mode) { case power_on: break; case power_off: if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD)) device_printf(sc->aw_dev, "Powering down sd/mmc\n"); if (sc->mmc_helper.vmmc_supply) { rv = regulator_status(sc->mmc_helper.vmmc_supply, ®_status); if (rv == 0 && reg_status == REGULATOR_STATUS_ENABLED) regulator_disable(sc->mmc_helper.vmmc_supply); } if (sc->mmc_helper.vqmmc_supply) { rv = regulator_status(sc->mmc_helper.vqmmc_supply, ®_status); if (rv == 0 && reg_status == REGULATOR_STATUS_ENABLED) regulator_disable(sc->mmc_helper.vqmmc_supply); } if (sc->mmc_helper.mmc_pwrseq) MMC_PWRSEQ_SET_POWER(sc->mmc_helper.mmc_pwrseq, false); aw_mmc_reset(sc); break; case power_up: if (__predict_false(aw_mmc_debug & AW_MMC_DEBUG_CARD)) device_printf(sc->aw_dev, "Powering up sd/mmc\n"); if (sc->mmc_helper.vmmc_supply) { rv = regulator_status(sc->mmc_helper.vmmc_supply, ®_status); if (rv == 0 && reg_status != REGULATOR_STATUS_ENABLED) regulator_enable(sc->mmc_helper.vmmc_supply); } if (sc->mmc_helper.vqmmc_supply) { rv = regulator_status(sc->mmc_helper.vqmmc_supply, ®_status); if (rv == 0 && reg_status != REGULATOR_STATUS_ENABLED) regulator_enable(sc->mmc_helper.vqmmc_supply); } if (sc->mmc_helper.mmc_pwrseq) MMC_PWRSEQ_SET_POWER(sc->mmc_helper.mmc_pwrseq, true); aw_mmc_init(sc); break; }; /* Enable ddr mode if needed */ reg = AW_MMC_READ_4(sc, AW_MMC_GCTL); if (ios->timing == bus_timing_uhs_ddr50 || ios->timing == bus_timing_mmc_ddr52) reg |= AW_MMC_GCTL_DDR_MOD_SEL; else reg &= ~AW_MMC_GCTL_DDR_MOD_SEL; AW_MMC_WRITE_4(sc, AW_MMC_GCTL, reg); if (ios->clock && ios->clock != sc->aw_clock) { sc->aw_clock = clock = ios->clock; /* Disable clock */ error = aw_mmc_update_clock(sc, 0); if (error != 0) return (error); if (ios->timing == bus_timing_mmc_ddr52 && (sc->aw_mmc_conf->new_timing || ios->bus_width == bus_width_8)) { div = 2; clock <<= 1; } /* Reset the divider. */ reg = AW_MMC_READ_4(sc, AW_MMC_CKCR); reg &= ~AW_MMC_CKCR_DIV; reg |= div - 1; AW_MMC_WRITE_4(sc, AW_MMC_CKCR, reg); /* New timing mode if needed */ if (sc->aw_mmc_conf->new_timing) { reg = AW_MMC_READ_4(sc, AW_MMC_NTSR); reg |= AW_MMC_NTSR_MODE_SELECT; AW_MMC_WRITE_4(sc, AW_MMC_NTSR, reg); } /* Set the MMC clock. */ error = clk_disable(sc->aw_clk_mmc); if (error != 0 && bootverbose) device_printf(sc->aw_dev, "failed to disable mmc clock: %d\n", error); error = clk_set_freq(sc->aw_clk_mmc, clock, CLK_SET_ROUND_DOWN); if (error != 0) { device_printf(sc->aw_dev, "failed to set frequency to %u Hz: %d\n", clock, error); return (error); } error = clk_enable(sc->aw_clk_mmc); if (error != 0 && bootverbose) device_printf(sc->aw_dev, "failed to re-enable mmc clock: %d\n", error); if (sc->aw_mmc_conf->can_calibrate) AW_MMC_WRITE_4(sc, AW_MMC_SAMP_DL, AW_MMC_SAMP_DL_SW_EN); /* Enable clock. */ error = aw_mmc_update_clock(sc, 1); if (error != 0) return (error); } return (0); } #ifndef MMCCAM static int aw_mmc_get_ro(device_t bus, device_t child) { struct aw_mmc_softc *sc; sc = device_get_softc(bus); return (mmc_fdt_gpio_get_readonly(&sc->mmc_helper)); } static int aw_mmc_acquire_host(device_t bus, device_t child) { struct aw_mmc_softc *sc; int error; sc = device_get_softc(bus); AW_MMC_LOCK(sc); while (sc->aw_bus_busy) { error = msleep(sc, &sc->aw_mtx, PCATCH, "mmchw", 0); if (error != 0) { AW_MMC_UNLOCK(sc); return (error); } } sc->aw_bus_busy++; AW_MMC_UNLOCK(sc); return (0); } static int aw_mmc_release_host(device_t bus, device_t child) { struct aw_mmc_softc *sc; sc = device_get_softc(bus); AW_MMC_LOCK(sc); sc->aw_bus_busy--; wakeup(sc); AW_MMC_UNLOCK(sc); return (0); } #endif static device_method_t aw_mmc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, aw_mmc_probe), DEVMETHOD(device_attach, aw_mmc_attach), DEVMETHOD(device_detach, aw_mmc_detach), /* Bus interface */ DEVMETHOD(bus_read_ivar, aw_mmc_read_ivar), DEVMETHOD(bus_write_ivar, aw_mmc_write_ivar), DEVMETHOD(bus_add_child, bus_generic_add_child), #ifndef MMCCAM /* MMC bridge interface */ DEVMETHOD(mmcbr_update_ios, aw_mmc_update_ios), DEVMETHOD(mmcbr_request, aw_mmc_request), DEVMETHOD(mmcbr_get_ro, aw_mmc_get_ro), DEVMETHOD(mmcbr_switch_vccq, aw_mmc_switch_vccq), DEVMETHOD(mmcbr_acquire_host, aw_mmc_acquire_host), DEVMETHOD(mmcbr_release_host, aw_mmc_release_host), #endif #ifdef MMCCAM /* MMCCAM interface */ DEVMETHOD(mmc_sim_get_tran_settings, aw_mmc_get_tran_settings), DEVMETHOD(mmc_sim_set_tran_settings, aw_mmc_set_tran_settings), DEVMETHOD(mmc_sim_cam_request, aw_mmc_cam_request), DEVMETHOD(mmc_sim_cam_poll, aw_mmc_cam_poll), #endif DEVMETHOD_END }; static driver_t aw_mmc_driver = { "aw_mmc", aw_mmc_methods, sizeof(struct aw_mmc_softc), }; DRIVER_MODULE(aw_mmc, simplebus, aw_mmc_driver, NULL, NULL); #ifndef MMCCAM MMC_DECLARE_BRIDGE(aw_mmc); #endif SIMPLEBUS_PNP_INFO(compat_data); diff --git a/sys/arm64/broadcom/genet/if_genet.c b/sys/arm64/broadcom/genet/if_genet.c index ccc35fe841df..0602f076b257 100644 --- a/sys/arm64/broadcom/genet/if_genet.c +++ b/sys/arm64/broadcom/genet/if_genet.c @@ -1,1829 +1,1826 @@ /*- * Copyright (c) 2020 Michael J Karels * Copyright (c) 2016, 2020 Jared McNeill * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * RPi4 (BCM 2711) Gigabit Ethernet ("GENET") controller * * This driver is derived in large part from bcmgenet.c from NetBSD by * Jared McNeill. Parts of the structure and other common code in * this driver have been copied from if_awg.c for the Allwinner EMAC, * also by Jared McNeill. */ #include "opt_device_polling.h" #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #define __BIT(_x) (1 << (_x)) #include "if_genetreg.h" #include #include #include #include #include #include #include "syscon_if.h" #include "miibus_if.h" #include "gpio_if.h" #define RD4(sc, reg) bus_read_4((sc)->res[_RES_MAC], (reg)) #define WR4(sc, reg, val) bus_write_4((sc)->res[_RES_MAC], (reg), (val)) #define GEN_LOCK(sc) mtx_lock(&(sc)->mtx) #define GEN_UNLOCK(sc) mtx_unlock(&(sc)->mtx) #define GEN_ASSERT_LOCKED(sc) mtx_assert(&(sc)->mtx, MA_OWNED) #define GEN_ASSERT_UNLOCKED(sc) mtx_assert(&(sc)->mtx, MA_NOTOWNED) #define TX_DESC_COUNT GENET_DMA_DESC_COUNT #define RX_DESC_COUNT GENET_DMA_DESC_COUNT #define TX_NEXT(n, count) (((n) + 1) & ((count) - 1)) #define RX_NEXT(n, count) (((n) + 1) & ((count) - 1)) #define TX_MAX_SEGS 20 static SYSCTL_NODE(_hw, OID_AUTO, genet, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "genet driver parameters"); /* Maximum number of mbufs to pass per call to if_input */ static int gen_rx_batch = 16 /* RX_BATCH_DEFAULT */; SYSCTL_INT(_hw_genet, OID_AUTO, rx_batch, CTLFLAG_RDTUN, &gen_rx_batch, 0, "max mbufs per call to if_input"); TUNABLE_INT("hw.gen.rx_batch", &gen_rx_batch); /* old name/interface */ /* * Transmitting packets with only an Ethernet header in the first mbuf * fails. Examples include reflected ICMPv6 packets, e.g. echo replies; * forwarded IPv6/TCP packets; and forwarded IPv4/TCP packets that use NAT * with IPFW. Pulling up the sizes of ether_header + ip6_hdr + icmp6_hdr * seems to work for both ICMPv6 and TCP over IPv6, as well as the IPv4/TCP * case. */ static int gen_tx_hdr_min = 56; /* ether_header + ip6_hdr + icmp6_hdr */ SYSCTL_INT(_hw_genet, OID_AUTO, tx_hdr_min, CTLFLAG_RW, &gen_tx_hdr_min, 0, "header to add to packets with ether header only"); static struct ofw_compat_data compat_data[] = { { "brcm,genet-v1", 1 }, { "brcm,genet-v2", 2 }, { "brcm,genet-v3", 3 }, { "brcm,genet-v4", 4 }, { "brcm,genet-v5", 5 }, { "brcm,bcm2711-genet-v5", 5 }, { NULL, 0 } }; enum { _RES_MAC, /* what to call this? */ _RES_IRQ1, _RES_IRQ2, _RES_NITEMS }; static struct resource_spec gen_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { SYS_RES_IRQ, 0, RF_ACTIVE }, { SYS_RES_IRQ, 1, RF_ACTIVE }, { -1, 0 } }; /* structure per ring entry */ struct gen_ring_ent { bus_dmamap_t map; struct mbuf *mbuf; }; struct tx_queue { int hwindex; /* hardware index */ int nentries; u_int queued; /* or avail? */ u_int cur; u_int next; u_int prod_idx; u_int cons_idx; struct gen_ring_ent *entries; }; struct rx_queue { int hwindex; /* hardware index */ int nentries; u_int cur; u_int prod_idx; u_int cons_idx; struct gen_ring_ent *entries; }; struct gen_softc { struct resource *res[_RES_NITEMS]; struct mtx mtx; if_t ifp; device_t dev; device_t miibus; mii_contype_t phy_mode; struct callout stat_ch; struct task link_task; void *ih; void *ih2; int type; int if_flags; int link; bus_dma_tag_t tx_buf_tag; /* * The genet chip has multiple queues for transmit and receive. * This driver uses only one (queue 16, the default), but is cast * with multiple rings. The additional rings are used for different * priorities. */ #define DEF_TXQUEUE 0 #define NTXQUEUE 1 struct tx_queue tx_queue[NTXQUEUE]; struct gen_ring_ent tx_ring_ent[TX_DESC_COUNT]; /* ring entries */ bus_dma_tag_t rx_buf_tag; #define DEF_RXQUEUE 0 #define NRXQUEUE 1 struct rx_queue rx_queue[NRXQUEUE]; struct gen_ring_ent rx_ring_ent[RX_DESC_COUNT]; /* ring entries */ }; static void gen_init(void *softc); static void gen_start(if_t ifp); static void gen_destroy(struct gen_softc *sc); static int gen_encap(struct gen_softc *sc, struct mbuf **mp); static int gen_parse_tx(struct mbuf *m, int csum_flags); static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data); static int gen_get_phy_mode(device_t dev); static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr); static void gen_set_enaddr(struct gen_softc *sc); static void gen_setup_rxfilter(struct gen_softc *sc); static void gen_reset(struct gen_softc *sc); static void gen_enable(struct gen_softc *sc); static void gen_dma_disable(struct gen_softc *sc); static int gen_bus_dma_init(struct gen_softc *sc); static void gen_bus_dma_teardown(struct gen_softc *sc); static void gen_enable_intr(struct gen_softc *sc); static void gen_init_txrings(struct gen_softc *sc); static void gen_init_rxrings(struct gen_softc *sc); static void gen_intr(void *softc); static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q); static void gen_txintr(struct gen_softc *sc, struct tx_queue *q); static void gen_intr2(void *softc); static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index); static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index, struct mbuf *m); static void gen_link_task(void *arg, int pending); static void gen_media_status(if_t ifp, struct ifmediareq *ifmr); static int gen_media_change(if_t ifp); static void gen_tick(void *softc); static int gen_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0) return (ENXIO); device_set_desc(dev, "RPi4 Gigabit Ethernet"); return (BUS_PROBE_DEFAULT); } static int gen_attach(device_t dev) { struct ether_addr eaddr; struct gen_softc *sc; int major, minor, error, mii_flags; bool eaddr_found; sc = device_get_softc(dev); sc->dev = dev; sc->type = ofw_bus_search_compatible(dev, compat_data)->ocd_data; if (bus_alloc_resources(dev, gen_spec, sc->res) != 0) { device_printf(dev, "cannot allocate resources for device\n"); error = ENXIO; goto fail; } major = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MAJOR) >> REV_MAJOR_SHIFT; if (major != REV_MAJOR_V5) { device_printf(dev, "version %d is not supported\n", major); error = ENXIO; goto fail; } minor = (RD4(sc, GENET_SYS_REV_CTRL) & REV_MINOR) >> REV_MINOR_SHIFT; device_printf(dev, "GENET version 5.%d phy 0x%04x\n", minor, RD4(sc, GENET_SYS_REV_CTRL) & REV_PHY); mtx_init(&sc->mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); callout_init_mtx(&sc->stat_ch, &sc->mtx, 0); TASK_INIT(&sc->link_task, 0, gen_link_task, sc); error = gen_get_phy_mode(dev); if (error != 0) goto fail; bzero(&eaddr, sizeof(eaddr)); eaddr_found = gen_get_eaddr(dev, &eaddr); /* reset core */ gen_reset(sc); gen_dma_disable(sc); /* Setup DMA */ error = gen_bus_dma_init(sc); if (error != 0) { device_printf(dev, "cannot setup bus dma\n"); goto fail; } /* Setup ethernet interface */ sc->ifp = if_alloc(IFT_ETHER); if_setsoftc(sc->ifp, sc); if_initname(sc->ifp, device_get_name(dev), device_get_unit(dev)); if_setflags(sc->ifp, IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST); if_setstartfn(sc->ifp, gen_start); if_setioctlfn(sc->ifp, gen_ioctl); if_setinitfn(sc->ifp, gen_init); if_setsendqlen(sc->ifp, TX_DESC_COUNT - 1); if_setsendqready(sc->ifp); #define GEN_CSUM_FEATURES (CSUM_UDP | CSUM_TCP) if_sethwassist(sc->ifp, GEN_CSUM_FEATURES); if_setcapabilities(sc->ifp, IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6); if_setcapenable(sc->ifp, if_getcapabilities(sc->ifp)); /* Install interrupt handlers */ error = bus_setup_intr(dev, sc->res[_RES_IRQ1], INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr, sc, &sc->ih); if (error != 0) { device_printf(dev, "cannot setup interrupt handler1\n"); goto fail; } error = bus_setup_intr(dev, sc->res[_RES_IRQ2], INTR_TYPE_NET | INTR_MPSAFE, NULL, gen_intr2, sc, &sc->ih2); if (error != 0) { device_printf(dev, "cannot setup interrupt handler2\n"); goto fail; } /* Attach MII driver */ mii_flags = 0; switch (sc->phy_mode) { case MII_CONTYPE_RGMII_ID: mii_flags |= MIIF_RX_DELAY | MIIF_TX_DELAY; break; case MII_CONTYPE_RGMII_RXID: mii_flags |= MIIF_RX_DELAY; break; case MII_CONTYPE_RGMII_TXID: mii_flags |= MIIF_TX_DELAY; break; default: break; } error = mii_attach(dev, &sc->miibus, sc->ifp, gen_media_change, gen_media_status, BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, mii_flags); if (error != 0) { device_printf(dev, "cannot attach PHY\n"); goto fail; } /* If address was not found, create one based on the hostid and name. */ if (eaddr_found == 0) ether_gen_addr(sc->ifp, &eaddr); /* Attach ethernet interface */ ether_ifattach(sc->ifp, eaddr.octet); fail: if (error) gen_destroy(sc); return (error); } /* Free resources after failed attach. This is not a complete detach. */ static void gen_destroy(struct gen_softc *sc) { - if (sc->miibus) { /* can't happen */ - device_delete_child(sc->dev, sc->miibus); - sc->miibus = NULL; - } + device_delete_children(sc->dev); bus_teardown_intr(sc->dev, sc->res[_RES_IRQ1], sc->ih); bus_teardown_intr(sc->dev, sc->res[_RES_IRQ2], sc->ih2); gen_bus_dma_teardown(sc); callout_drain(&sc->stat_ch); if (mtx_initialized(&sc->mtx)) mtx_destroy(&sc->mtx); bus_release_resources(sc->dev, gen_spec, sc->res); if (sc->ifp != NULL) { if_free(sc->ifp); sc->ifp = NULL; } } static int gen_get_phy_mode(device_t dev) { struct gen_softc *sc; phandle_t node; mii_contype_t type; int error = 0; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); type = mii_fdt_get_contype(node); switch (type) { case MII_CONTYPE_RGMII: case MII_CONTYPE_RGMII_ID: case MII_CONTYPE_RGMII_RXID: case MII_CONTYPE_RGMII_TXID: sc->phy_mode = type; break; default: device_printf(dev, "unknown phy-mode '%s'\n", mii_fdt_contype_to_name(type)); error = ENXIO; break; } return (error); } static bool gen_get_eaddr(device_t dev, struct ether_addr *eaddr) { struct gen_softc *sc; uint32_t maclo, machi, val; phandle_t node; sc = device_get_softc(dev); node = ofw_bus_get_node(dev); if (OF_getprop(node, "mac-address", eaddr->octet, ETHER_ADDR_LEN) != -1 || OF_getprop(node, "local-mac-address", eaddr->octet, ETHER_ADDR_LEN) != -1 || OF_getprop(node, "address", eaddr->octet, ETHER_ADDR_LEN) != -1) return (true); device_printf(dev, "No Ethernet address found in fdt!\n"); maclo = machi = 0; val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL); if ((val & GENET_SYS_RBUF_FLUSH_RESET) == 0) { maclo = htobe32(RD4(sc, GENET_UMAC_MAC0)); machi = htobe16(RD4(sc, GENET_UMAC_MAC1) & 0xffff); } if (maclo == 0 && machi == 0) { if (bootverbose) device_printf(dev, "No Ethernet address found in controller\n"); return (false); } else { eaddr->octet[0] = maclo & 0xff; eaddr->octet[1] = (maclo >> 8) & 0xff; eaddr->octet[2] = (maclo >> 16) & 0xff; eaddr->octet[3] = (maclo >> 24) & 0xff; eaddr->octet[4] = machi & 0xff; eaddr->octet[5] = (machi >> 8) & 0xff; return (true); } } static void gen_reset(struct gen_softc *sc) { uint32_t val; val = RD4(sc, GENET_SYS_RBUF_FLUSH_CTRL); val |= GENET_SYS_RBUF_FLUSH_RESET; WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val); DELAY(10); val &= ~GENET_SYS_RBUF_FLUSH_RESET; WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, val); DELAY(10); WR4(sc, GENET_SYS_RBUF_FLUSH_CTRL, 0); DELAY(10); WR4(sc, GENET_UMAC_CMD, 0); WR4(sc, GENET_UMAC_CMD, GENET_UMAC_CMD_LCL_LOOP_EN | GENET_UMAC_CMD_SW_RESET); DELAY(10); WR4(sc, GENET_UMAC_CMD, 0); WR4(sc, GENET_UMAC_MIB_CTRL, GENET_UMAC_MIB_RESET_RUNT | GENET_UMAC_MIB_RESET_RX | GENET_UMAC_MIB_RESET_TX); WR4(sc, GENET_UMAC_MIB_CTRL, 0); } static void gen_enable(struct gen_softc *sc) { u_int val; WR4(sc, GENET_UMAC_MAX_FRAME_LEN, 1536); val = RD4(sc, GENET_RBUF_CTRL); val |= GENET_RBUF_ALIGN_2B; WR4(sc, GENET_RBUF_CTRL, val); WR4(sc, GENET_RBUF_TBUF_SIZE_CTRL, 1); /* Enable transmitter and receiver */ val = RD4(sc, GENET_UMAC_CMD); val |= GENET_UMAC_CMD_TXEN; val |= GENET_UMAC_CMD_RXEN; WR4(sc, GENET_UMAC_CMD, val); /* Enable interrupts */ gen_enable_intr(sc); WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE); } static void gen_disable_intr(struct gen_softc *sc) { /* Disable interrupts */ WR4(sc, GENET_INTRL2_CPU_SET_MASK, 0xffffffff); WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, 0xffffffff); } static void gen_disable(struct gen_softc *sc) { uint32_t val; /* Stop receiver */ val = RD4(sc, GENET_UMAC_CMD); val &= ~GENET_UMAC_CMD_RXEN; WR4(sc, GENET_UMAC_CMD, val); /* Stop transmitter */ val = RD4(sc, GENET_UMAC_CMD); val &= ~GENET_UMAC_CMD_TXEN; WR4(sc, GENET_UMAC_CMD, val); /* Disable Interrupt */ gen_disable_intr(sc); } static void gen_enable_offload(struct gen_softc *sc) { uint32_t check_ctrl, buf_ctrl; check_ctrl = RD4(sc, GENET_RBUF_CHECK_CTRL); buf_ctrl = RD4(sc, GENET_RBUF_CTRL); if ((if_getcapenable(sc->ifp) & IFCAP_RXCSUM) != 0) { check_ctrl |= GENET_RBUF_CHECK_CTRL_EN; buf_ctrl |= GENET_RBUF_64B_EN; } else { check_ctrl &= ~GENET_RBUF_CHECK_CTRL_EN; buf_ctrl &= ~GENET_RBUF_64B_EN; } WR4(sc, GENET_RBUF_CHECK_CTRL, check_ctrl); WR4(sc, GENET_RBUF_CTRL, buf_ctrl); buf_ctrl = RD4(sc, GENET_TBUF_CTRL); if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) != 0) buf_ctrl |= GENET_RBUF_64B_EN; else buf_ctrl &= ~GENET_RBUF_64B_EN; WR4(sc, GENET_TBUF_CTRL, buf_ctrl); } static void gen_dma_disable(struct gen_softc *sc) { int val; val = RD4(sc, GENET_TX_DMA_CTRL); val &= ~GENET_TX_DMA_CTRL_EN; val &= ~GENET_TX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE); WR4(sc, GENET_TX_DMA_CTRL, val); val = RD4(sc, GENET_RX_DMA_CTRL); val &= ~GENET_RX_DMA_CTRL_EN; val &= ~GENET_RX_DMA_CTRL_RBUF_EN(GENET_DMA_DEFAULT_QUEUE); WR4(sc, GENET_RX_DMA_CTRL, val); } static int gen_bus_dma_init(struct gen_softc *sc) { device_t dev = sc->dev; int i, error; error = bus_dma_tag_create( bus_get_dma_tag(dev), /* Parent tag */ 4, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_40BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, TX_MAX_SEGS, /* maxsize, nsegs */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->tx_buf_tag); if (error != 0) { device_printf(dev, "cannot create TX buffer tag\n"); return (error); } for (i = 0; i < TX_DESC_COUNT; i++) { error = bus_dmamap_create(sc->tx_buf_tag, 0, &sc->tx_ring_ent[i].map); if (error != 0) { device_printf(dev, "cannot create TX buffer map\n"); return (error); } } error = bus_dma_tag_create( bus_get_dma_tag(dev), /* Parent tag */ 4, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_40BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ MCLBYTES, 1, /* maxsize, nsegs */ MCLBYTES, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->rx_buf_tag); if (error != 0) { device_printf(dev, "cannot create RX buffer tag\n"); return (error); } for (i = 0; i < RX_DESC_COUNT; i++) { error = bus_dmamap_create(sc->rx_buf_tag, 0, &sc->rx_ring_ent[i].map); if (error != 0) { device_printf(dev, "cannot create RX buffer map\n"); return (error); } } return (0); } static void gen_bus_dma_teardown(struct gen_softc *sc) { int i, error; if (sc->tx_buf_tag != NULL) { for (i = 0; i < TX_DESC_COUNT; i++) { error = bus_dmamap_destroy(sc->tx_buf_tag, sc->tx_ring_ent[i].map); sc->tx_ring_ent[i].map = NULL; if (error) device_printf(sc->dev, "%s: bus_dmamap_destroy failed: %d\n", __func__, error); } error = bus_dma_tag_destroy(sc->tx_buf_tag); sc->tx_buf_tag = NULL; if (error) device_printf(sc->dev, "%s: bus_dma_tag_destroy failed: %d\n", __func__, error); } if (sc->tx_buf_tag != NULL) { for (i = 0; i < RX_DESC_COUNT; i++) { error = bus_dmamap_destroy(sc->rx_buf_tag, sc->rx_ring_ent[i].map); sc->rx_ring_ent[i].map = NULL; if (error) device_printf(sc->dev, "%s: bus_dmamap_destroy failed: %d\n", __func__, error); } error = bus_dma_tag_destroy(sc->rx_buf_tag); sc->rx_buf_tag = NULL; if (error) device_printf(sc->dev, "%s: bus_dma_tag_destroy failed: %d\n", __func__, error); } } static void gen_enable_intr(struct gen_softc *sc) { WR4(sc, GENET_INTRL2_CPU_CLEAR_MASK, GENET_IRQ_TXDMA_DONE | GENET_IRQ_RXDMA_DONE); } /* * "queue" is the software queue index (0-4); "qid" is the hardware index * (0-16). "base" is the starting index in the ring array. */ static void gen_init_txring(struct gen_softc *sc, int queue, int qid, int base, int nentries) { struct tx_queue *q; uint32_t val; q = &sc->tx_queue[queue]; q->entries = &sc->tx_ring_ent[base]; q->hwindex = qid; q->nentries = nentries; /* TX ring */ q->queued = 0; q->cons_idx = q->prod_idx = 0; WR4(sc, GENET_TX_SCB_BURST_SIZE, 0x08); WR4(sc, GENET_TX_DMA_READ_PTR_LO(qid), 0); WR4(sc, GENET_TX_DMA_READ_PTR_HI(qid), 0); WR4(sc, GENET_TX_DMA_CONS_INDEX(qid), 0); WR4(sc, GENET_TX_DMA_PROD_INDEX(qid), 0); WR4(sc, GENET_TX_DMA_RING_BUF_SIZE(qid), (nentries << GENET_TX_DMA_RING_BUF_SIZE_DESC_SHIFT) | (MCLBYTES & GENET_TX_DMA_RING_BUF_SIZE_BUF_LEN_MASK)); WR4(sc, GENET_TX_DMA_START_ADDR_LO(qid), 0); WR4(sc, GENET_TX_DMA_START_ADDR_HI(qid), 0); WR4(sc, GENET_TX_DMA_END_ADDR_LO(qid), TX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1); WR4(sc, GENET_TX_DMA_END_ADDR_HI(qid), 0); WR4(sc, GENET_TX_DMA_MBUF_DONE_THRES(qid), 1); WR4(sc, GENET_TX_DMA_FLOW_PERIOD(qid), 0); WR4(sc, GENET_TX_DMA_WRITE_PTR_LO(qid), 0); WR4(sc, GENET_TX_DMA_WRITE_PTR_HI(qid), 0); WR4(sc, GENET_TX_DMA_RING_CFG, __BIT(qid)); /* enable */ /* Enable transmit DMA */ val = RD4(sc, GENET_TX_DMA_CTRL); val |= GENET_TX_DMA_CTRL_EN; val |= GENET_TX_DMA_CTRL_RBUF_EN(qid); WR4(sc, GENET_TX_DMA_CTRL, val); } /* * "queue" is the software queue index (0-4); "qid" is the hardware index * (0-16). "base" is the starting index in the ring array. */ static void gen_init_rxring(struct gen_softc *sc, int queue, int qid, int base, int nentries) { struct rx_queue *q; uint32_t val; int i; q = &sc->rx_queue[queue]; q->entries = &sc->rx_ring_ent[base]; q->hwindex = qid; q->nentries = nentries; q->cons_idx = q->prod_idx = 0; WR4(sc, GENET_RX_SCB_BURST_SIZE, 0x08); WR4(sc, GENET_RX_DMA_WRITE_PTR_LO(qid), 0); WR4(sc, GENET_RX_DMA_WRITE_PTR_HI(qid), 0); WR4(sc, GENET_RX_DMA_PROD_INDEX(qid), 0); WR4(sc, GENET_RX_DMA_CONS_INDEX(qid), 0); WR4(sc, GENET_RX_DMA_RING_BUF_SIZE(qid), (nentries << GENET_RX_DMA_RING_BUF_SIZE_DESC_SHIFT) | (MCLBYTES & GENET_RX_DMA_RING_BUF_SIZE_BUF_LEN_MASK)); WR4(sc, GENET_RX_DMA_START_ADDR_LO(qid), 0); WR4(sc, GENET_RX_DMA_START_ADDR_HI(qid), 0); WR4(sc, GENET_RX_DMA_END_ADDR_LO(qid), RX_DESC_COUNT * GENET_DMA_DESC_SIZE / 4 - 1); WR4(sc, GENET_RX_DMA_END_ADDR_HI(qid), 0); WR4(sc, GENET_RX_DMA_XON_XOFF_THRES(qid), (5 << GENET_RX_DMA_XON_XOFF_THRES_LO_SHIFT) | (RX_DESC_COUNT >> 4)); WR4(sc, GENET_RX_DMA_READ_PTR_LO(qid), 0); WR4(sc, GENET_RX_DMA_READ_PTR_HI(qid), 0); WR4(sc, GENET_RX_DMA_RING_CFG, __BIT(qid)); /* enable */ /* fill ring */ for (i = 0; i < RX_DESC_COUNT; i++) gen_newbuf_rx(sc, &sc->rx_queue[DEF_RXQUEUE], i); /* Enable receive DMA */ val = RD4(sc, GENET_RX_DMA_CTRL); val |= GENET_RX_DMA_CTRL_EN; val |= GENET_RX_DMA_CTRL_RBUF_EN(qid); WR4(sc, GENET_RX_DMA_CTRL, val); } static void gen_init_txrings(struct gen_softc *sc) { int base = 0; #ifdef PRI_RINGS int i; /* init priority rings */ for (i = 0; i < PRI_RINGS; i++) { gen_init_txring(sc, i, i, base, TX_DESC_PRICOUNT); sc->tx_queue[i].queue = i; base += TX_DESC_PRICOUNT; dma_ring_conf |= 1 << i; dma_control |= DMA_RENABLE(i); } #endif /* init GENET_DMA_DEFAULT_QUEUE (16) */ gen_init_txring(sc, DEF_TXQUEUE, GENET_DMA_DEFAULT_QUEUE, base, TX_DESC_COUNT); sc->tx_queue[DEF_TXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE; } static void gen_init_rxrings(struct gen_softc *sc) { int base = 0; #ifdef PRI_RINGS int i; /* init priority rings */ for (i = 0; i < PRI_RINGS; i++) { gen_init_rxring(sc, i, i, base, TX_DESC_PRICOUNT); sc->rx_queue[i].queue = i; base += TX_DESC_PRICOUNT; dma_ring_conf |= 1 << i; dma_control |= DMA_RENABLE(i); } #endif /* init GENET_DMA_DEFAULT_QUEUE (16) */ gen_init_rxring(sc, DEF_RXQUEUE, GENET_DMA_DEFAULT_QUEUE, base, RX_DESC_COUNT); sc->rx_queue[DEF_RXQUEUE].hwindex = GENET_DMA_DEFAULT_QUEUE; } static void gen_stop(struct gen_softc *sc) { int i; struct gen_ring_ent *ent; GEN_ASSERT_LOCKED(sc); callout_stop(&sc->stat_ch); if_setdrvflagbits(sc->ifp, 0, IFF_DRV_RUNNING); gen_reset(sc); gen_disable(sc); gen_dma_disable(sc); /* Clear the tx/rx ring buffer */ for (i = 0; i < TX_DESC_COUNT; i++) { ent = &sc->tx_ring_ent[i]; if (ent->mbuf != NULL) { bus_dmamap_sync(sc->tx_buf_tag, ent->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tx_buf_tag, ent->map); m_freem(ent->mbuf); ent->mbuf = NULL; } } for (i = 0; i < RX_DESC_COUNT; i++) { ent = &sc->rx_ring_ent[i]; if (ent->mbuf != NULL) { bus_dmamap_sync(sc->rx_buf_tag, ent->map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rx_buf_tag, ent->map); m_freem(ent->mbuf); ent->mbuf = NULL; } } } static void gen_init_locked(struct gen_softc *sc) { struct mii_data *mii; if_t ifp; mii = device_get_softc(sc->miibus); ifp = sc->ifp; GEN_ASSERT_LOCKED(sc); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) return; switch (sc->phy_mode) { case MII_CONTYPE_RGMII: case MII_CONTYPE_RGMII_ID: case MII_CONTYPE_RGMII_RXID: case MII_CONTYPE_RGMII_TXID: WR4(sc, GENET_SYS_PORT_CTRL, GENET_SYS_PORT_MODE_EXT_GPHY); break; default: WR4(sc, GENET_SYS_PORT_CTRL, 0); } gen_set_enaddr(sc); /* Setup RX filter */ gen_setup_rxfilter(sc); gen_init_txrings(sc); gen_init_rxrings(sc); gen_enable(sc); gen_enable_offload(sc); if_setdrvflagbits(ifp, IFF_DRV_RUNNING, IFF_DRV_OACTIVE); mii_mediachg(mii); callout_reset(&sc->stat_ch, hz, gen_tick, sc); } static void gen_init(void *softc) { struct gen_softc *sc; sc = softc; GEN_LOCK(sc); gen_init_locked(sc); GEN_UNLOCK(sc); } static uint8_t ether_broadcastaddr[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; static void gen_setup_rxfilter_mdf(struct gen_softc *sc, u_int n, const uint8_t *ea) { uint32_t addr0 = (ea[0] << 8) | ea[1]; uint32_t addr1 = (ea[2] << 24) | (ea[3] << 16) | (ea[4] << 8) | ea[5]; WR4(sc, GENET_UMAC_MDF_ADDR0(n), addr0); WR4(sc, GENET_UMAC_MDF_ADDR1(n), addr1); } static u_int gen_setup_multi(void *arg, struct sockaddr_dl *sdl, u_int count) { struct gen_softc *sc = arg; /* "count + 2" to account for unicast and broadcast */ gen_setup_rxfilter_mdf(sc, count + 2, LLADDR(sdl)); return (1); /* increment to count */ } static void gen_setup_rxfilter(struct gen_softc *sc) { if_t ifp = sc->ifp; uint32_t cmd, mdf_ctrl; u_int n; GEN_ASSERT_LOCKED(sc); cmd = RD4(sc, GENET_UMAC_CMD); /* * Count the required number of hardware filters. We need one * for each multicast address, plus one for our own address and * the broadcast address. */ n = if_llmaddr_count(ifp) + 2; if (n > GENET_MAX_MDF_FILTER) if_setflagbits(ifp, IFF_ALLMULTI, 0); else if_setflagbits(ifp, 0, IFF_ALLMULTI); if ((if_getflags(ifp) & (IFF_PROMISC|IFF_ALLMULTI)) != 0) { cmd |= GENET_UMAC_CMD_PROMISC; mdf_ctrl = 0; } else { cmd &= ~GENET_UMAC_CMD_PROMISC; gen_setup_rxfilter_mdf(sc, 0, ether_broadcastaddr); gen_setup_rxfilter_mdf(sc, 1, if_getlladdr(ifp)); (void) if_foreach_llmaddr(ifp, gen_setup_multi, sc); mdf_ctrl = (__BIT(GENET_MAX_MDF_FILTER) - 1) &~ (__BIT(GENET_MAX_MDF_FILTER - n) - 1); } WR4(sc, GENET_UMAC_CMD, cmd); WR4(sc, GENET_UMAC_MDF_CTRL, mdf_ctrl); } static void gen_set_enaddr(struct gen_softc *sc) { uint8_t *enaddr; uint32_t val; if_t ifp; GEN_ASSERT_LOCKED(sc); ifp = sc->ifp; /* Write our unicast address */ enaddr = if_getlladdr(ifp); /* Write hardware address */ val = enaddr[3] | (enaddr[2] << 8) | (enaddr[1] << 16) | (enaddr[0] << 24); WR4(sc, GENET_UMAC_MAC0, val); val = enaddr[5] | (enaddr[4] << 8); WR4(sc, GENET_UMAC_MAC1, val); } static void gen_start_locked(struct gen_softc *sc) { struct mbuf *m; if_t ifp; int err; GEN_ASSERT_LOCKED(sc); if (!sc->link) return; ifp = sc->ifp; if ((if_getdrvflags(ifp) & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return; while (true) { m = if_dequeue(ifp); if (m == NULL) break; err = gen_encap(sc, &m); if (err != 0) { if (err == ENOBUFS) if_setdrvflagbits(ifp, IFF_DRV_OACTIVE, 0); else if (m == NULL) if_inc_counter(ifp, IFCOUNTER_OERRORS, 1); if (m != NULL) if_sendq_prepend(ifp, m); break; } bpf_mtap_if(ifp, m); } } static void gen_start(if_t ifp) { struct gen_softc *sc; sc = if_getsoftc(ifp); GEN_LOCK(sc); gen_start_locked(sc); GEN_UNLOCK(sc); } /* Test for any delayed checksum */ #define CSUM_DELAY_ANY (CSUM_TCP | CSUM_UDP | CSUM_IP6_TCP | CSUM_IP6_UDP) static int gen_encap(struct gen_softc *sc, struct mbuf **mp) { bus_dmamap_t map; bus_dma_segment_t segs[TX_MAX_SEGS]; int error, nsegs, cur, first, i, index, offset; uint32_t csuminfo, length_status, csum_flags = 0, csumdata; struct mbuf *m; struct statusblock *sb = NULL; struct tx_queue *q; struct gen_ring_ent *ent; GEN_ASSERT_LOCKED(sc); q = &sc->tx_queue[DEF_TXQUEUE]; if (q->queued == q->nentries) { /* tx_queue is full */ return (ENOBUFS); } m = *mp; /* * Don't attempt to send packets with only an Ethernet header in * first mbuf; see comment above with gen_tx_hdr_min. */ if (m->m_len == sizeof(struct ether_header)) { m = m_pullup(m, MIN(m->m_pkthdr.len, gen_tx_hdr_min)); if (m == NULL) { if (if_getflags(sc->ifp) & IFF_DEBUG) device_printf(sc->dev, "header pullup fail\n"); *mp = NULL; return (ENOMEM); } } if ((if_getcapenable(sc->ifp) & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) != 0) { csum_flags = m->m_pkthdr.csum_flags; csumdata = m->m_pkthdr.csum_data; M_PREPEND(m, sizeof(struct statusblock), M_NOWAIT); if (m == NULL) { if (if_getflags(sc->ifp) & IFF_DEBUG) device_printf(sc->dev, "prepend fail\n"); *mp = NULL; return (ENOMEM); } offset = gen_parse_tx(m, csum_flags); sb = mtod(m, struct statusblock *); if ((csum_flags & CSUM_DELAY_ANY) != 0) { csuminfo = (offset << TXCSUM_OFF_SHIFT) | (offset + csumdata); csuminfo |= TXCSUM_LEN_VALID; if (csum_flags & (CSUM_UDP | CSUM_IP6_UDP)) csuminfo |= TXCSUM_UDP; sb->txcsuminfo = csuminfo; } else sb->txcsuminfo = 0; } *mp = m; cur = first = q->cur; ent = &q->entries[cur]; map = ent->map; error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { m = m_collapse(m, M_NOWAIT, TX_MAX_SEGS); if (m == NULL) { device_printf(sc->dev, "gen_encap: m_collapse failed\n"); m_freem(*mp); *mp = NULL; return (ENOMEM); } *mp = m; error = bus_dmamap_load_mbuf_sg(sc->tx_buf_tag, map, m, segs, &nsegs, BUS_DMA_NOWAIT); if (error != 0) { m_freem(*mp); *mp = NULL; } } if (error != 0) { device_printf(sc->dev, "gen_encap: bus_dmamap_load_mbuf_sg failed\n"); return (error); } if (nsegs == 0) { m_freem(*mp); *mp = NULL; return (EIO); } /* Remove statusblock after mapping, before possible requeue or bpf. */ if (sb != NULL) { m->m_data += sizeof(struct statusblock); m->m_len -= sizeof(struct statusblock); m->m_pkthdr.len -= sizeof(struct statusblock); } if (q->queued + nsegs > q->nentries) { bus_dmamap_unload(sc->tx_buf_tag, map); return (ENOBUFS); } bus_dmamap_sync(sc->tx_buf_tag, map, BUS_DMASYNC_PREWRITE); index = q->prod_idx & (q->nentries - 1); for (i = 0; i < nsegs; i++) { ent = &q->entries[cur]; length_status = GENET_TX_DESC_STATUS_QTAG_MASK; if (i == 0) { length_status |= GENET_TX_DESC_STATUS_SOP | GENET_TX_DESC_STATUS_CRC; if ((csum_flags & CSUM_DELAY_ANY) != 0) length_status |= GENET_TX_DESC_STATUS_CKSUM; } if (i == nsegs - 1) length_status |= GENET_TX_DESC_STATUS_EOP; length_status |= segs[i].ds_len << GENET_TX_DESC_STATUS_BUFLEN_SHIFT; WR4(sc, GENET_TX_DESC_ADDRESS_LO(index), (uint32_t)segs[i].ds_addr); WR4(sc, GENET_TX_DESC_ADDRESS_HI(index), (uint32_t)(segs[i].ds_addr >> 32)); WR4(sc, GENET_TX_DESC_STATUS(index), length_status); ++q->queued; cur = TX_NEXT(cur, q->nentries); index = TX_NEXT(index, q->nentries); } q->prod_idx += nsegs; q->prod_idx &= GENET_TX_DMA_PROD_CONS_MASK; /* We probably don't need to write the producer index on every iter */ if (nsegs != 0) WR4(sc, GENET_TX_DMA_PROD_INDEX(q->hwindex), q->prod_idx); q->cur = cur; /* Store mbuf in the last segment */ q->entries[first].mbuf = m; return (0); } /* * Parse a packet to find the offset of the transport header for checksum * offload. Ensure that the link and network headers are contiguous with * the status block, or transmission fails. */ static int gen_parse_tx(struct mbuf *m, int csum_flags) { int offset, off_in_m; bool copy = false, shift = false; u_char *p, *copy_p = NULL; struct mbuf *m0 = m; uint16_t ether_type; if (m->m_len == sizeof(struct statusblock)) { /* M_PREPEND placed statusblock at end; move to beginning */ m->m_data = m->m_pktdat; copy_p = mtodo(m, sizeof(struct statusblock)); m = m->m_next; off_in_m = 0; p = mtod(m, u_char *); copy = true; } else { /* * If statusblock is not at beginning of mbuf (likely), * then remember to move mbuf contents down before copying * after them. */ if ((m->m_flags & M_EXT) == 0 && m->m_data != m->m_pktdat) shift = true; p = mtodo(m, sizeof(struct statusblock)); off_in_m = sizeof(struct statusblock); } /* * If headers need to be copied contiguous to statusblock, do so. * If copying to the internal mbuf data area, and the status block * is not at the beginning of that area, shift the status block (which * is empty) and following data. */ #define COPY(size) { \ int hsize = size; \ if (copy) { \ if (shift) { \ u_char *p0; \ shift = false; \ p0 = mtodo(m0, sizeof(struct statusblock)); \ m0->m_data = m0->m_pktdat; \ bcopy(p0, mtodo(m0, sizeof(struct statusblock)),\ m0->m_len - sizeof(struct statusblock)); \ copy_p = mtodo(m0, m0->m_len); \ } \ bcopy(p, copy_p, hsize); \ m0->m_len += hsize; \ m->m_len -= hsize; \ m->m_data += hsize; \ } \ copy_p += hsize; \ } KASSERT((sizeof(struct statusblock) + sizeof(struct ether_vlan_header) + sizeof(struct ip6_hdr) <= MLEN), ("%s: mbuf too small", __func__)); if (((struct ether_header *)p)->ether_type == htons(ETHERTYPE_VLAN)) { offset = sizeof(struct ether_vlan_header); ether_type = ntohs(((struct ether_vlan_header *)p)->evl_proto); COPY(sizeof(struct ether_vlan_header)); if (m->m_len == off_in_m + sizeof(struct ether_vlan_header)) { m = m->m_next; off_in_m = 0; p = mtod(m, u_char *); copy = true; } else { off_in_m += sizeof(struct ether_vlan_header); p += sizeof(struct ether_vlan_header); } } else { offset = sizeof(struct ether_header); ether_type = ntohs(((struct ether_header *)p)->ether_type); COPY(sizeof(struct ether_header)); if (m->m_len == off_in_m + sizeof(struct ether_header)) { m = m->m_next; off_in_m = 0; p = mtod(m, u_char *); copy = true; } else { off_in_m += sizeof(struct ether_header); p += sizeof(struct ether_header); } } if (ether_type == ETHERTYPE_IP) { COPY(((struct ip *)p)->ip_hl << 2); offset += ((struct ip *)p)->ip_hl << 2; } else if (ether_type == ETHERTYPE_IPV6) { COPY(sizeof(struct ip6_hdr)); offset += sizeof(struct ip6_hdr); } else { /* * Unknown whether most other cases require moving a header; * ARP works without. However, Wake On LAN packets sent * by wake(8) via BPF need something like this. */ COPY(MIN(gen_tx_hdr_min, m->m_len)); offset += MIN(gen_tx_hdr_min, m->m_len); } return (offset); #undef COPY } static void gen_intr(void *arg) { struct gen_softc *sc = arg; uint32_t val; GEN_LOCK(sc); val = RD4(sc, GENET_INTRL2_CPU_STAT); val &= ~RD4(sc, GENET_INTRL2_CPU_STAT_MASK); WR4(sc, GENET_INTRL2_CPU_CLEAR, val); if (val & GENET_IRQ_RXDMA_DONE) gen_rxintr(sc, &sc->rx_queue[DEF_RXQUEUE]); if (val & GENET_IRQ_TXDMA_DONE) { gen_txintr(sc, &sc->tx_queue[DEF_TXQUEUE]); if (!if_sendq_empty(sc->ifp)) gen_start_locked(sc); } GEN_UNLOCK(sc); } static int gen_rxintr(struct gen_softc *sc, struct rx_queue *q) { if_t ifp; struct mbuf *m, *mh, *mt; struct statusblock *sb = NULL; int error, index, len, cnt, npkt, n; uint32_t status, prod_idx, total; ifp = sc->ifp; mh = mt = NULL; cnt = 0; npkt = 0; prod_idx = RD4(sc, GENET_RX_DMA_PROD_INDEX(q->hwindex)) & GENET_RX_DMA_PROD_CONS_MASK; total = (prod_idx - q->cons_idx) & GENET_RX_DMA_PROD_CONS_MASK; index = q->cons_idx & (RX_DESC_COUNT - 1); for (n = 0; n < total; n++) { bus_dmamap_sync(sc->rx_buf_tag, q->entries[index].map, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->rx_buf_tag, q->entries[index].map); m = q->entries[index].mbuf; if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0) { sb = mtod(m, struct statusblock *); status = sb->status_buflen; } else status = RD4(sc, GENET_RX_DESC_STATUS(index)); len = (status & GENET_RX_DESC_STATUS_BUFLEN_MASK) >> GENET_RX_DESC_STATUS_BUFLEN_SHIFT; /* check for errors */ if ((status & (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP | GENET_RX_DESC_STATUS_RX_ERROR)) != (GENET_RX_DESC_STATUS_SOP | GENET_RX_DESC_STATUS_EOP)) { if (if_getflags(ifp) & IFF_DEBUG) device_printf(sc->dev, "error/frag %x csum %x\n", status, sb->rxcsum); if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); continue; } error = gen_newbuf_rx(sc, q, index); if (error != 0) { if_inc_counter(ifp, IFCOUNTER_IQDROPS, 1); if (if_getflags(ifp) & IFF_DEBUG) device_printf(sc->dev, "gen_newbuf_rx %d\n", error); /* reuse previous mbuf */ (void) gen_mapbuf_rx(sc, q, index, m); continue; } if (sb != NULL) { if (status & GENET_RX_DESC_STATUS_CKSUM_OK) { /* L4 checksum checked; not sure about L3. */ m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR; m->m_pkthdr.csum_data = 0xffff; } m->m_data += sizeof(struct statusblock); m->m_len -= sizeof(struct statusblock); len -= sizeof(struct statusblock); } if (len > ETHER_ALIGN) { m_adj(m, ETHER_ALIGN); len -= ETHER_ALIGN; } m->m_pkthdr.rcvif = ifp; m->m_pkthdr.len = len; m->m_len = len; if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); m->m_nextpkt = NULL; if (mh == NULL) mh = m; else mt->m_nextpkt = m; mt = m; ++cnt; ++npkt; index = RX_NEXT(index, q->nentries); q->cons_idx = (q->cons_idx + 1) & GENET_RX_DMA_PROD_CONS_MASK; WR4(sc, GENET_RX_DMA_CONS_INDEX(q->hwindex), q->cons_idx); if (cnt == gen_rx_batch) { GEN_UNLOCK(sc); if_input(ifp, mh); GEN_LOCK(sc); mh = mt = NULL; cnt = 0; } } if (mh != NULL) { GEN_UNLOCK(sc); if_input(ifp, mh); GEN_LOCK(sc); } return (npkt); } static void gen_txintr(struct gen_softc *sc, struct tx_queue *q) { uint32_t cons_idx, total; struct gen_ring_ent *ent; if_t ifp; int i, prog; GEN_ASSERT_LOCKED(sc); ifp = sc->ifp; cons_idx = RD4(sc, GENET_TX_DMA_CONS_INDEX(q->hwindex)) & GENET_TX_DMA_PROD_CONS_MASK; total = (cons_idx - q->cons_idx) & GENET_TX_DMA_PROD_CONS_MASK; prog = 0; for (i = q->next; q->queued > 0 && total > 0; i = TX_NEXT(i, q->nentries), total--) { /* XXX check for errors */ ent = &q->entries[i]; if (ent->mbuf != NULL) { bus_dmamap_sync(sc->tx_buf_tag, ent->map, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->tx_buf_tag, ent->map); m_freem(ent->mbuf); ent->mbuf = NULL; if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } prog++; --q->queued; } if (prog > 0) { q->next = i; if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE); } q->cons_idx = cons_idx; } static void gen_intr2(void *arg) { struct gen_softc *sc = arg; device_printf(sc->dev, "gen_intr2\n"); } static int gen_newbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index) { struct mbuf *m; m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (m == NULL) return (ENOBUFS); m->m_pkthdr.len = m->m_len = m->m_ext.ext_size; m_adj(m, ETHER_ALIGN); return (gen_mapbuf_rx(sc, q, index, m)); } static int gen_mapbuf_rx(struct gen_softc *sc, struct rx_queue *q, int index, struct mbuf *m) { bus_dma_segment_t seg; bus_dmamap_t map; int nsegs; map = q->entries[index].map; if (bus_dmamap_load_mbuf_sg(sc->rx_buf_tag, map, m, &seg, &nsegs, BUS_DMA_NOWAIT) != 0) { m_freem(m); return (ENOBUFS); } bus_dmamap_sync(sc->rx_buf_tag, map, BUS_DMASYNC_PREREAD); q->entries[index].mbuf = m; WR4(sc, GENET_RX_DESC_ADDRESS_LO(index), (uint32_t)seg.ds_addr); WR4(sc, GENET_RX_DESC_ADDRESS_HI(index), (uint32_t)(seg.ds_addr >> 32)); return (0); } static int gen_ioctl(if_t ifp, u_long cmd, caddr_t data) { struct gen_softc *sc; struct mii_data *mii; struct ifreq *ifr; int flags, enable, error; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus); ifr = (struct ifreq *)data; error = 0; switch (cmd) { case SIOCSIFFLAGS: GEN_LOCK(sc); if (if_getflags(ifp) & IFF_UP) { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { flags = if_getflags(ifp) ^ sc->if_flags; if ((flags & (IFF_PROMISC|IFF_ALLMULTI)) != 0) gen_setup_rxfilter(sc); } else gen_init_locked(sc); } else { if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) gen_stop(sc); } sc->if_flags = if_getflags(ifp); GEN_UNLOCK(sc); break; case SIOCADDMULTI: case SIOCDELMULTI: if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) { GEN_LOCK(sc); gen_setup_rxfilter(sc); GEN_UNLOCK(sc); } break; case SIOCSIFMEDIA: case SIOCGIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); break; case SIOCSIFCAP: enable = if_getcapenable(ifp); flags = ifr->ifr_reqcap ^ enable; if (flags & IFCAP_RXCSUM) enable ^= IFCAP_RXCSUM; if (flags & IFCAP_RXCSUM_IPV6) enable ^= IFCAP_RXCSUM_IPV6; if (flags & IFCAP_TXCSUM) enable ^= IFCAP_TXCSUM; if (flags & IFCAP_TXCSUM_IPV6) enable ^= IFCAP_TXCSUM_IPV6; if (enable & (IFCAP_TXCSUM | IFCAP_TXCSUM_IPV6)) if_sethwassist(ifp, GEN_CSUM_FEATURES); else if_sethwassist(ifp, 0); if_setcapenable(ifp, enable); if (if_getdrvflags(ifp) & IFF_DRV_RUNNING) gen_enable_offload(sc); break; default: error = ether_ioctl(ifp, cmd, data); break; } return (error); } static void gen_tick(void *softc) { struct gen_softc *sc; struct mii_data *mii; if_t ifp; int link; sc = softc; ifp = sc->ifp; mii = device_get_softc(sc->miibus); GEN_ASSERT_LOCKED(sc); if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) return; link = sc->link; mii_tick(mii); if (sc->link && !link) gen_start_locked(sc); callout_reset(&sc->stat_ch, hz, gen_tick, sc); } #define MII_BUSY_RETRY 1000 static int gen_miibus_readreg(device_t dev, int phy, int reg) { struct gen_softc *sc; int retry, val; sc = device_get_softc(dev); val = 0; WR4(sc, GENET_MDIO_CMD, GENET_MDIO_READ | (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT)); val = RD4(sc, GENET_MDIO_CMD); WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY); for (retry = MII_BUSY_RETRY; retry > 0; retry--) { if (((val = RD4(sc, GENET_MDIO_CMD)) & GENET_MDIO_START_BUSY) == 0) { if (val & GENET_MDIO_READ_FAILED) return (0); /* -1? */ val &= GENET_MDIO_VAL_MASK; break; } DELAY(10); } if (retry == 0) device_printf(dev, "phy read timeout, phy=%d reg=%d\n", phy, reg); return (val); } static int gen_miibus_writereg(device_t dev, int phy, int reg, int val) { struct gen_softc *sc; int retry; sc = device_get_softc(dev); WR4(sc, GENET_MDIO_CMD, GENET_MDIO_WRITE | (phy << GENET_MDIO_ADDR_SHIFT) | (reg << GENET_MDIO_REG_SHIFT) | (val & GENET_MDIO_VAL_MASK)); val = RD4(sc, GENET_MDIO_CMD); WR4(sc, GENET_MDIO_CMD, val | GENET_MDIO_START_BUSY); for (retry = MII_BUSY_RETRY; retry > 0; retry--) { val = RD4(sc, GENET_MDIO_CMD); if ((val & GENET_MDIO_START_BUSY) == 0) break; DELAY(10); } if (retry == 0) device_printf(dev, "phy write timeout, phy=%d reg=%d\n", phy, reg); return (0); } static void gen_update_link_locked(struct gen_softc *sc) { struct mii_data *mii; uint32_t val; u_int speed; GEN_ASSERT_LOCKED(sc); if ((if_getdrvflags(sc->ifp) & IFF_DRV_RUNNING) == 0) return; mii = device_get_softc(sc->miibus); if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == (IFM_ACTIVE | IFM_AVALID)) { switch (IFM_SUBTYPE(mii->mii_media_active)) { case IFM_1000_T: case IFM_1000_SX: speed = GENET_UMAC_CMD_SPEED_1000; sc->link = 1; break; case IFM_100_TX: speed = GENET_UMAC_CMD_SPEED_100; sc->link = 1; break; case IFM_10_T: speed = GENET_UMAC_CMD_SPEED_10; sc->link = 1; break; default: sc->link = 0; break; } } else sc->link = 0; if (sc->link == 0) return; val = RD4(sc, GENET_EXT_RGMII_OOB_CTRL); val &= ~GENET_EXT_RGMII_OOB_OOB_DISABLE; val |= GENET_EXT_RGMII_OOB_RGMII_LINK; val |= GENET_EXT_RGMII_OOB_RGMII_MODE_EN; if (sc->phy_mode == MII_CONTYPE_RGMII) val |= GENET_EXT_RGMII_OOB_ID_MODE_DISABLE; else val &= ~GENET_EXT_RGMII_OOB_ID_MODE_DISABLE; WR4(sc, GENET_EXT_RGMII_OOB_CTRL, val); val = RD4(sc, GENET_UMAC_CMD); val &= ~GENET_UMAC_CMD_SPEED; val |= speed; WR4(sc, GENET_UMAC_CMD, val); } static void gen_link_task(void *arg, int pending) { struct gen_softc *sc; sc = arg; GEN_LOCK(sc); gen_update_link_locked(sc); GEN_UNLOCK(sc); } static void gen_miibus_statchg(device_t dev) { struct gen_softc *sc; sc = device_get_softc(dev); taskqueue_enqueue(taskqueue_swi, &sc->link_task); } static void gen_media_status(if_t ifp, struct ifmediareq *ifmr) { struct gen_softc *sc; struct mii_data *mii; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus); GEN_LOCK(sc); mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; GEN_UNLOCK(sc); } static int gen_media_change(if_t ifp) { struct gen_softc *sc; struct mii_data *mii; int error; sc = if_getsoftc(ifp); mii = device_get_softc(sc->miibus); GEN_LOCK(sc); error = mii_mediachg(mii); GEN_UNLOCK(sc); return (error); } static device_method_t gen_methods[] = { /* Device interface */ DEVMETHOD(device_probe, gen_probe), DEVMETHOD(device_attach, gen_attach), /* MII interface */ DEVMETHOD(miibus_readreg, gen_miibus_readreg), DEVMETHOD(miibus_writereg, gen_miibus_writereg), DEVMETHOD(miibus_statchg, gen_miibus_statchg), DEVMETHOD_END }; static driver_t gen_driver = { "genet", gen_methods, sizeof(struct gen_softc), }; DRIVER_MODULE(genet, simplebus, gen_driver, 0, 0); DRIVER_MODULE(miibus, genet, miibus_driver, 0, 0); MODULE_DEPEND(genet, ether, 1, 1, 1); MODULE_DEPEND(genet, miibus, 1, 1, 1); diff --git a/sys/dev/intpm/intpm.c b/sys/dev/intpm/intpm.c index 6fc228ac0cd0..3aa76ff48263 100644 --- a/sys/dev/intpm/intpm.c +++ b/sys/dev/intpm/intpm.c @@ -1,902 +1,901 @@ /*- * Copyright (c) 1998, 1999 Takanori Watanabe * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include #include #include #include #include #include #include #include #include #include #include "smbus_if.h" #include #include #include #include #include "opt_intpm.h" struct intsmb_softc { device_t dev; struct resource *io_res; struct resource *irq_res; void *irq_hand; device_t smbus; int io_rid; int isbusy; int cfg_irq9; int sb8xx; int poll; struct mtx lock; }; #define INTSMB_LOCK(sc) mtx_lock(&(sc)->lock) #define INTSMB_UNLOCK(sc) mtx_unlock(&(sc)->lock) #define INTSMB_LOCK_ASSERT(sc) mtx_assert(&(sc)->lock, MA_OWNED) static int intsmb_probe(device_t); static int intsmb_attach(device_t); static int intsmb_detach(device_t); static int intsmb_intr(struct intsmb_softc *sc); static int intsmb_slvintr(struct intsmb_softc *sc); static void intsmb_alrintr(struct intsmb_softc *sc); static int intsmb_callback(device_t dev, int index, void *data); static int intsmb_quick(device_t dev, u_char slave, int how); static int intsmb_sendb(device_t dev, u_char slave, char byte); static int intsmb_recvb(device_t dev, u_char slave, char *byte); static int intsmb_writeb(device_t dev, u_char slave, char cmd, char byte); static int intsmb_writew(device_t dev, u_char slave, char cmd, short word); static int intsmb_readb(device_t dev, u_char slave, char cmd, char *byte); static int intsmb_readw(device_t dev, u_char slave, char cmd, short *word); static int intsmb_pcall(device_t dev, u_char slave, char cmd, short sdata, short *rdata); static int intsmb_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf); static int intsmb_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf); static void intsmb_start(struct intsmb_softc *sc, u_char cmd, int nointr); static int intsmb_stop(struct intsmb_softc *sc); static int intsmb_stop_poll(struct intsmb_softc *sc); static int intsmb_free(struct intsmb_softc *sc); static void intsmb_rawintr(void *arg); const struct intsmb_device { uint32_t devid; const char *description; } intsmb_products[] = { { 0x71138086, "Intel PIIX4 SMBUS Interface" }, { 0x719b8086, "Intel PIIX4 SMBUS Interface" }, #if 0 /* Not a good idea yet, this stops isab0 functioning */ { 0x02001166, "ServerWorks OSB4" }, #endif { 0x43721002, "ATI IXP400 SMBus Controller" }, { AMDSB_SMBUS_DEVID, "AMD SB600/7xx/8xx/9xx SMBus Controller" }, { AMDFCH_SMBUS_DEVID, "AMD FCH SMBus Controller" }, { AMDCZ_SMBUS_DEVID, "AMD FCH SMBus Controller" }, { HYGONCZ_SMBUS_DEVID, "Hygon FCH SMBus Controller" }, }; static int intsmb_probe(device_t dev) { const struct intsmb_device *isd; uint32_t devid; size_t i; devid = pci_get_devid(dev); for (i = 0; i < nitems(intsmb_products); i++) { isd = &intsmb_products[i]; if (isd->devid == devid) { device_set_desc(dev, isd->description); return (BUS_PROBE_DEFAULT); } } return (ENXIO); } static uint8_t amd_pmio_read(struct resource *res, uint8_t reg) { bus_write_1(res, 0, reg); /* Index */ return (bus_read_1(res, 1)); /* Data */ } static int sb8xx_attach(device_t dev) { static const int AMDSB_SMBIO_WIDTH = 0x10; struct intsmb_softc *sc; struct resource *res; uint32_t devid; uint8_t revid; uint16_t addr; int rid; int rc; bool enabled; sc = device_get_softc(dev); rid = 0; rc = bus_set_resource(dev, SYS_RES_IOPORT, rid, AMDSB_PMIO_INDEX, AMDSB_PMIO_WIDTH); if (rc != 0) { device_printf(dev, "bus_set_resource for PM IO failed\n"); return (ENXIO); } res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (res == NULL) { device_printf(dev, "bus_alloc_resource for PM IO failed\n"); return (ENXIO); } devid = pci_get_devid(dev); revid = pci_get_revid(dev); if (devid == AMDSB_SMBUS_DEVID || (devid == AMDFCH_SMBUS_DEVID && revid < AMDFCH41_SMBUS_REVID) || (devid == AMDCZ_SMBUS_DEVID && revid < AMDCZ49_SMBUS_REVID)) { addr = amd_pmio_read(res, AMDSB8_PM_SMBUS_EN + 1); addr <<= 8; addr |= amd_pmio_read(res, AMDSB8_PM_SMBUS_EN); enabled = (addr & AMDSB8_SMBUS_EN) != 0; addr &= AMDSB8_SMBUS_ADDR_MASK; } else { addr = amd_pmio_read(res, AMDFCH41_PM_DECODE_EN0); enabled = (addr & AMDFCH41_SMBUS_EN) != 0; addr = amd_pmio_read(res, AMDFCH41_PM_DECODE_EN1); addr <<= 8; } bus_release_resource(dev, SYS_RES_IOPORT, rid, res); bus_delete_resource(dev, SYS_RES_IOPORT, rid); if (!enabled) { device_printf(dev, "SB8xx/SB9xx/FCH SMBus not enabled\n"); return (ENXIO); } sc->io_rid = 0; rc = bus_set_resource(dev, SYS_RES_IOPORT, sc->io_rid, addr, AMDSB_SMBIO_WIDTH); if (rc != 0) { device_printf(dev, "bus_set_resource for SMBus IO failed\n"); return (ENXIO); } sc->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->io_rid, RF_ACTIVE); if (sc->io_res == NULL) { device_printf(dev, "Could not allocate I/O space\n"); return (ENXIO); } sc->poll = 1; return (0); } static void intsmb_release_resources(device_t dev) { struct intsmb_softc *sc = device_get_softc(dev); - if (sc->smbus) - device_delete_child(dev, sc->smbus); + device_delete_children(dev); if (sc->irq_hand) bus_teardown_intr(dev, sc->irq_res, sc->irq_hand); if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->io_res) bus_release_resource(dev, SYS_RES_IOPORT, sc->io_rid, sc->io_res); mtx_destroy(&sc->lock); } static int intsmb_attach(device_t dev) { struct intsmb_softc *sc = device_get_softc(dev); int error, rid, value; int intr; char *str; sc->dev = dev; mtx_init(&sc->lock, device_get_nameunit(dev), "intsmb", MTX_DEF); sc->cfg_irq9 = 0; switch (pci_get_devid(dev)) { #ifndef NO_CHANGE_PCICONF case 0x71138086: /* Intel 82371AB */ case 0x719b8086: /* Intel 82443MX */ /* Changing configuration is allowed. */ sc->cfg_irq9 = 1; break; #endif case AMDSB_SMBUS_DEVID: if (pci_get_revid(dev) >= AMDSB8_SMBUS_REVID) sc->sb8xx = 1; break; case AMDFCH_SMBUS_DEVID: case AMDCZ_SMBUS_DEVID: case HYGONCZ_SMBUS_DEVID: sc->sb8xx = 1; break; } if (sc->sb8xx) { error = sb8xx_attach(dev); if (error != 0) goto fail; else goto no_intr; } sc->io_rid = PCI_BASE_ADDR_SMB; sc->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->io_rid, RF_ACTIVE); if (sc->io_res == NULL) { device_printf(dev, "Could not allocate I/O space\n"); error = ENXIO; goto fail; } if (sc->cfg_irq9) { pci_write_config(dev, PCIR_INTLINE, 0x9, 1); pci_write_config(dev, PCI_HST_CFG_SMB, PCI_INTR_SMB_IRQ9 | PCI_INTR_SMB_ENABLE, 1); } value = pci_read_config(dev, PCI_HST_CFG_SMB, 1); sc->poll = (value & PCI_INTR_SMB_ENABLE) == 0; intr = value & PCI_INTR_SMB_MASK; switch (intr) { case PCI_INTR_SMB_SMI: str = "SMI"; break; case PCI_INTR_SMB_IRQ9: str = "IRQ 9"; break; case PCI_INTR_SMB_IRQ_PCI: str = "PCI IRQ"; break; default: str = "BOGUS"; } device_printf(dev, "intr %s %s ", str, sc->poll == 0 ? "enabled" : "disabled"); printf("revision %d\n", pci_read_config(dev, PCI_REVID_SMB, 1)); if (!sc->poll && intr == PCI_INTR_SMB_SMI) { device_printf(dev, "using polling mode when configured interrupt is SMI\n"); sc->poll = 1; } if (sc->poll) goto no_intr; if (intr != PCI_INTR_SMB_IRQ9 && intr != PCI_INTR_SMB_IRQ_PCI) { device_printf(dev, "Unsupported interrupt mode\n"); error = ENXIO; goto fail; } /* Force IRQ 9. */ rid = 0; if (sc->cfg_irq9) bus_set_resource(dev, SYS_RES_IRQ, rid, 9, 1); sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Could not allocate irq\n"); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, intsmb_rawintr, sc, &sc->irq_hand); if (error) { device_printf(dev, "Failed to map intr\n"); goto fail; } no_intr: sc->isbusy = 0; sc->smbus = device_add_child(dev, "smbus", DEVICE_UNIT_ANY); if (sc->smbus == NULL) { device_printf(dev, "failed to add smbus child\n"); error = ENXIO; goto fail; } error = device_probe_and_attach(sc->smbus); if (error) { device_printf(dev, "failed to probe+attach smbus child\n"); goto fail; } #ifdef ENABLE_ALART /* Enable Arart */ bus_write_1(sc->io_res, PIIX4_SMBSLVCNT, PIIX4_SMBSLVCNT_ALTEN); #endif return (0); fail: intsmb_release_resources(dev); return (error); } static int intsmb_detach(device_t dev) { int error; error = bus_generic_detach(dev); if (error) { device_printf(dev, "bus detach failed\n"); return (error); } intsmb_release_resources(dev); return (0); } static void intsmb_rawintr(void *arg) { struct intsmb_softc *sc = arg; INTSMB_LOCK(sc); intsmb_intr(sc); intsmb_slvintr(sc); INTSMB_UNLOCK(sc); } static int intsmb_callback(device_t dev, int index, void *data) { int error = 0; switch (index) { case SMB_REQUEST_BUS: break; case SMB_RELEASE_BUS: break; default: error = SMB_EINVAL; } return (error); } /* Counterpart of smbtx_smb_free(). */ static int intsmb_free(struct intsmb_softc *sc) { INTSMB_LOCK_ASSERT(sc); if ((bus_read_1(sc->io_res, PIIX4_SMBHSTSTS) & PIIX4_SMBHSTSTAT_BUSY) || #ifdef ENABLE_ALART (bus_read_1(sc->io_res, PIIX4_SMBSLVSTS) & PIIX4_SMBSLVSTS_BUSY) || #endif sc->isbusy) return (SMB_EBUSY); sc->isbusy = 1; /* Disable Interrupt in slave part. */ #ifndef ENABLE_ALART bus_write_1(sc->io_res, PIIX4_SMBSLVCNT, 0); #endif /* Reset INTR Flag to prepare INTR. */ bus_write_1(sc->io_res, PIIX4_SMBHSTSTS, PIIX4_SMBHSTSTAT_INTR | PIIX4_SMBHSTSTAT_ERR | PIIX4_SMBHSTSTAT_BUSC | PIIX4_SMBHSTSTAT_FAIL); return (0); } static int intsmb_intr(struct intsmb_softc *sc) { int status, tmp; status = bus_read_1(sc->io_res, PIIX4_SMBHSTSTS); if (status & PIIX4_SMBHSTSTAT_BUSY) return (1); if (status & (PIIX4_SMBHSTSTAT_INTR | PIIX4_SMBHSTSTAT_ERR | PIIX4_SMBHSTSTAT_BUSC | PIIX4_SMBHSTSTAT_FAIL)) { tmp = bus_read_1(sc->io_res, PIIX4_SMBHSTCNT); bus_write_1(sc->io_res, PIIX4_SMBHSTCNT, tmp & ~PIIX4_SMBHSTCNT_INTREN); if (sc->isbusy) { sc->isbusy = 0; wakeup(sc); } return (0); } return (1); /* Not Completed */ } static int intsmb_slvintr(struct intsmb_softc *sc) { int status; status = bus_read_1(sc->io_res, PIIX4_SMBSLVSTS); if (status & PIIX4_SMBSLVSTS_BUSY) return (1); if (status & PIIX4_SMBSLVSTS_ALART) intsmb_alrintr(sc); else if (status & ~(PIIX4_SMBSLVSTS_ALART | PIIX4_SMBSLVSTS_SDW2 | PIIX4_SMBSLVSTS_SDW1)) { } /* Reset Status Register */ bus_write_1(sc->io_res, PIIX4_SMBSLVSTS, PIIX4_SMBSLVSTS_ALART | PIIX4_SMBSLVSTS_SDW2 | PIIX4_SMBSLVSTS_SDW1 | PIIX4_SMBSLVSTS_SLV); return (0); } static void intsmb_alrintr(struct intsmb_softc *sc) { int slvcnt __unused; #ifdef ENABLE_ALART int error; uint8_t addr; #endif /* Stop generating INTR from ALART. */ slvcnt = bus_read_1(sc->io_res, PIIX4_SMBSLVCNT); #ifdef ENABLE_ALART bus_write_1(sc->io_res, PIIX4_SMBSLVCNT, slvcnt & ~PIIX4_SMBSLVCNT_ALTEN); #endif DELAY(5); /* Ask bus who asserted it and then ask it what's the matter. */ #ifdef ENABLE_ALART error = intsmb_free(sc); if (error) return; bus_write_1(sc->io_res, PIIX4_SMBHSTADD, SMBALTRESP | LSB); intsmb_start(sc, PIIX4_SMBHSTCNT_PROT_BYTE, 1); error = intsmb_stop_poll(sc); if (error) device_printf(sc->dev, "ALART: ERROR\n"); else { addr = bus_read_1(sc->io_res, PIIX4_SMBHSTDAT0); device_printf(sc->dev, "ALART_RESPONSE: 0x%x\n", addr); } /* Re-enable INTR from ALART. */ bus_write_1(sc->io_res, PIIX4_SMBSLVCNT, slvcnt | PIIX4_SMBSLVCNT_ALTEN); DELAY(5); #endif } static void intsmb_start(struct intsmb_softc *sc, unsigned char cmd, int nointr) { unsigned char tmp; INTSMB_LOCK_ASSERT(sc); tmp = bus_read_1(sc->io_res, PIIX4_SMBHSTCNT); tmp &= 0xe0; tmp |= cmd; tmp |= PIIX4_SMBHSTCNT_START; /* While not in autoconfiguration enable interrupts. */ if (!sc->poll && !cold && !nointr) tmp |= PIIX4_SMBHSTCNT_INTREN; bus_write_1(sc->io_res, PIIX4_SMBHSTCNT, tmp); } static int intsmb_error(device_t dev, int status) { int error = 0; /* * PIIX4_SMBHSTSTAT_ERR can mean either of * - SMB_ENOACK ("Unclaimed cycle"), * - SMB_ETIMEOUT ("Host device time-out"), * - SMB_EINVAL ("Illegal command field"). * SMB_ENOACK seems to be most typical. */ if (status & PIIX4_SMBHSTSTAT_ERR) error |= SMB_ENOACK; if (status & PIIX4_SMBHSTSTAT_BUSC) error |= SMB_ECOLLI; if (status & PIIX4_SMBHSTSTAT_FAIL) error |= SMB_EABORT; if (error != 0 && bootverbose) device_printf(dev, "error = %d, status = %#x\n", error, status); return (error); } /* * Polling Code. * * Polling is not encouraged because it requires waiting for the * device if it is busy. * (29063505.pdf from Intel) But during boot, interrupt cannot be used, so use * polling code then. */ static int intsmb_stop_poll(struct intsmb_softc *sc) { int error, i, status, tmp; INTSMB_LOCK_ASSERT(sc); /* First, wait for busy to be set. */ for (i = 0; i < 0x7fff; i++) if (bus_read_1(sc->io_res, PIIX4_SMBHSTSTS) & PIIX4_SMBHSTSTAT_BUSY) break; /* Wait for busy to clear. */ for (i = 0; i < 0x7fff; i++) { status = bus_read_1(sc->io_res, PIIX4_SMBHSTSTS); if (!(status & PIIX4_SMBHSTSTAT_BUSY)) { sc->isbusy = 0; error = intsmb_error(sc->dev, status); return (error); } } /* Timed out waiting for busy to clear. */ sc->isbusy = 0; tmp = bus_read_1(sc->io_res, PIIX4_SMBHSTCNT); bus_write_1(sc->io_res, PIIX4_SMBHSTCNT, tmp & ~PIIX4_SMBHSTCNT_INTREN); return (SMB_ETIMEOUT); } /* * Wait for completion and return result. */ static int intsmb_stop(struct intsmb_softc *sc) { int error, status; INTSMB_LOCK_ASSERT(sc); if (sc->poll || cold) /* So that it can use device during device probe on SMBus. */ return (intsmb_stop_poll(sc)); error = msleep(sc, &sc->lock, PWAIT | PCATCH, "SMBWAI", hz / 8); if (error == 0) { status = bus_read_1(sc->io_res, PIIX4_SMBHSTSTS); if (!(status & PIIX4_SMBHSTSTAT_BUSY)) { error = intsmb_error(sc->dev, status); if (error == 0 && !(status & PIIX4_SMBHSTSTAT_INTR)) device_printf(sc->dev, "unknown cause why?\n"); #ifdef ENABLE_ALART bus_write_1(sc->io_res, PIIX4_SMBSLVCNT, PIIX4_SMBSLVCNT_ALTEN); #endif return (error); } } /* Timeout Procedure. */ sc->isbusy = 0; /* Re-enable suppressed interrupt from slave part. */ bus_write_1(sc->io_res, PIIX4_SMBSLVCNT, PIIX4_SMBSLVCNT_ALTEN); if (error == EWOULDBLOCK) return (SMB_ETIMEOUT); else return (SMB_EABORT); } static int intsmb_quick(device_t dev, u_char slave, int how) { struct intsmb_softc *sc = device_get_softc(dev); int error; u_char data; data = slave; /* Quick command is part of Address, I think. */ switch(how) { case SMB_QWRITE: data &= ~LSB; break; case SMB_QREAD: data |= LSB; break; default: return (SMB_EINVAL); } INTSMB_LOCK(sc); error = intsmb_free(sc); if (error) { INTSMB_UNLOCK(sc); return (error); } bus_write_1(sc->io_res, PIIX4_SMBHSTADD, data); intsmb_start(sc, PIIX4_SMBHSTCNT_PROT_QUICK, 0); error = intsmb_stop(sc); INTSMB_UNLOCK(sc); return (error); } static int intsmb_sendb(device_t dev, u_char slave, char byte) { struct intsmb_softc *sc = device_get_softc(dev); int error; INTSMB_LOCK(sc); error = intsmb_free(sc); if (error) { INTSMB_UNLOCK(sc); return (error); } bus_write_1(sc->io_res, PIIX4_SMBHSTADD, slave & ~LSB); bus_write_1(sc->io_res, PIIX4_SMBHSTCMD, byte); intsmb_start(sc, PIIX4_SMBHSTCNT_PROT_BYTE, 0); error = intsmb_stop(sc); INTSMB_UNLOCK(sc); return (error); } static int intsmb_recvb(device_t dev, u_char slave, char *byte) { struct intsmb_softc *sc = device_get_softc(dev); int error; INTSMB_LOCK(sc); error = intsmb_free(sc); if (error) { INTSMB_UNLOCK(sc); return (error); } bus_write_1(sc->io_res, PIIX4_SMBHSTADD, slave | LSB); intsmb_start(sc, PIIX4_SMBHSTCNT_PROT_BYTE, 0); error = intsmb_stop(sc); if (error == 0) { #ifdef RECV_IS_IN_CMD /* * Linux SMBus stuff also troubles * Because Intel's datasheet does not make clear. */ *byte = bus_read_1(sc->io_res, PIIX4_SMBHSTCMD); #else *byte = bus_read_1(sc->io_res, PIIX4_SMBHSTDAT0); #endif } INTSMB_UNLOCK(sc); return (error); } static int intsmb_writeb(device_t dev, u_char slave, char cmd, char byte) { struct intsmb_softc *sc = device_get_softc(dev); int error; INTSMB_LOCK(sc); error = intsmb_free(sc); if (error) { INTSMB_UNLOCK(sc); return (error); } bus_write_1(sc->io_res, PIIX4_SMBHSTADD, slave & ~LSB); bus_write_1(sc->io_res, PIIX4_SMBHSTCMD, cmd); bus_write_1(sc->io_res, PIIX4_SMBHSTDAT0, byte); intsmb_start(sc, PIIX4_SMBHSTCNT_PROT_BDATA, 0); error = intsmb_stop(sc); INTSMB_UNLOCK(sc); return (error); } static int intsmb_writew(device_t dev, u_char slave, char cmd, short word) { struct intsmb_softc *sc = device_get_softc(dev); int error; INTSMB_LOCK(sc); error = intsmb_free(sc); if (error) { INTSMB_UNLOCK(sc); return (error); } bus_write_1(sc->io_res, PIIX4_SMBHSTADD, slave & ~LSB); bus_write_1(sc->io_res, PIIX4_SMBHSTCMD, cmd); bus_write_1(sc->io_res, PIIX4_SMBHSTDAT0, word & 0xff); bus_write_1(sc->io_res, PIIX4_SMBHSTDAT1, (word >> 8) & 0xff); intsmb_start(sc, PIIX4_SMBHSTCNT_PROT_WDATA, 0); error = intsmb_stop(sc); INTSMB_UNLOCK(sc); return (error); } static int intsmb_readb(device_t dev, u_char slave, char cmd, char *byte) { struct intsmb_softc *sc = device_get_softc(dev); int error; INTSMB_LOCK(sc); error = intsmb_free(sc); if (error) { INTSMB_UNLOCK(sc); return (error); } bus_write_1(sc->io_res, PIIX4_SMBHSTADD, slave | LSB); bus_write_1(sc->io_res, PIIX4_SMBHSTCMD, cmd); intsmb_start(sc, PIIX4_SMBHSTCNT_PROT_BDATA, 0); error = intsmb_stop(sc); if (error == 0) *byte = bus_read_1(sc->io_res, PIIX4_SMBHSTDAT0); INTSMB_UNLOCK(sc); return (error); } static int intsmb_readw(device_t dev, u_char slave, char cmd, short *word) { struct intsmb_softc *sc = device_get_softc(dev); int error; INTSMB_LOCK(sc); error = intsmb_free(sc); if (error) { INTSMB_UNLOCK(sc); return (error); } bus_write_1(sc->io_res, PIIX4_SMBHSTADD, slave | LSB); bus_write_1(sc->io_res, PIIX4_SMBHSTCMD, cmd); intsmb_start(sc, PIIX4_SMBHSTCNT_PROT_WDATA, 0); error = intsmb_stop(sc); if (error == 0) { *word = bus_read_1(sc->io_res, PIIX4_SMBHSTDAT0); *word |= bus_read_1(sc->io_res, PIIX4_SMBHSTDAT1) << 8; } INTSMB_UNLOCK(sc); return (error); } static int intsmb_pcall(device_t dev, u_char slave, char cmd, short sdata, short *rdata) { return (SMB_ENOTSUPP); } static int intsmb_bwrite(device_t dev, u_char slave, char cmd, u_char count, char *buf) { struct intsmb_softc *sc = device_get_softc(dev); int error, i; if (count > SMBBLOCKTRANS_MAX || count == 0) return (SMB_EINVAL); INTSMB_LOCK(sc); error = intsmb_free(sc); if (error) { INTSMB_UNLOCK(sc); return (error); } /* Reset internal array index. */ bus_read_1(sc->io_res, PIIX4_SMBHSTCNT); bus_write_1(sc->io_res, PIIX4_SMBHSTADD, slave & ~LSB); bus_write_1(sc->io_res, PIIX4_SMBHSTCMD, cmd); for (i = 0; i < count; i++) bus_write_1(sc->io_res, PIIX4_SMBBLKDAT, buf[i]); bus_write_1(sc->io_res, PIIX4_SMBHSTDAT0, count); intsmb_start(sc, PIIX4_SMBHSTCNT_PROT_BLOCK, 0); error = intsmb_stop(sc); INTSMB_UNLOCK(sc); return (error); } static int intsmb_bread(device_t dev, u_char slave, char cmd, u_char *count, char *buf) { struct intsmb_softc *sc = device_get_softc(dev); int error, i; u_char nread; INTSMB_LOCK(sc); error = intsmb_free(sc); if (error) { INTSMB_UNLOCK(sc); return (error); } /* Reset internal array index. */ bus_read_1(sc->io_res, PIIX4_SMBHSTCNT); bus_write_1(sc->io_res, PIIX4_SMBHSTADD, slave | LSB); bus_write_1(sc->io_res, PIIX4_SMBHSTCMD, cmd); intsmb_start(sc, PIIX4_SMBHSTCNT_PROT_BLOCK, 0); error = intsmb_stop(sc); if (error == 0) { nread = bus_read_1(sc->io_res, PIIX4_SMBHSTDAT0); if (nread != 0 && nread <= SMBBLOCKTRANS_MAX) { *count = nread; for (i = 0; i < nread; i++) bus_read_1(sc->io_res, PIIX4_SMBBLKDAT); } else error = SMB_EBUSERR; } INTSMB_UNLOCK(sc); return (error); } static device_method_t intsmb_methods[] = { /* Device interface */ DEVMETHOD(device_probe, intsmb_probe), DEVMETHOD(device_attach, intsmb_attach), DEVMETHOD(device_detach, intsmb_detach), /* SMBus interface */ DEVMETHOD(smbus_callback, intsmb_callback), DEVMETHOD(smbus_quick, intsmb_quick), DEVMETHOD(smbus_sendb, intsmb_sendb), DEVMETHOD(smbus_recvb, intsmb_recvb), DEVMETHOD(smbus_writeb, intsmb_writeb), DEVMETHOD(smbus_writew, intsmb_writew), DEVMETHOD(smbus_readb, intsmb_readb), DEVMETHOD(smbus_readw, intsmb_readw), DEVMETHOD(smbus_pcall, intsmb_pcall), DEVMETHOD(smbus_bwrite, intsmb_bwrite), DEVMETHOD(smbus_bread, intsmb_bread), DEVMETHOD_END }; static driver_t intsmb_driver = { "intsmb", intsmb_methods, sizeof(struct intsmb_softc), }; DRIVER_MODULE_ORDERED(intsmb, pci, intsmb_driver, 0, 0, SI_ORDER_ANY); DRIVER_MODULE(smbus, intsmb, smbus_driver, 0, 0); MODULE_DEPEND(intsmb, smbus, SMBUS_MINVER, SMBUS_PREFVER, SMBUS_MAXVER); MODULE_VERSION(intsmb, 1); MODULE_PNP_INFO("W32:vendor/device;D:#", pci, intpm, intsmb_products, nitems(intsmb_products));