Index: head/share/man/man4/man4.powerpc/dtsec.4 =================================================================== --- head/share/man/man4/man4.powerpc/dtsec.4 (revision 325254) +++ head/share/man/man4/man4.powerpc/dtsec.4 (revision 325255) @@ -1,118 +1,119 @@ .\" .\" Copyright (c) 2016 Justin Hibbits .\" .\" All rights reserved. .\" .\" Redistribution and use in source and binary forms, with or without .\" modification, are permitted provided that the following conditions .\" are met: .\" 1. Redistributions of source code must retain the above copyright .\" notice, this list of conditions and the following disclaimer. .\" 2. Redistributions in binary form must reproduce the above copyright .\" notice, this list of conditions and the following disclaimer in the .\" documentation and/or other materials provided with the distribution. .\" .\" THIS SOFTWARE IS PROVIDED BY THE DEVELOPERS ``AS IS'' AND ANY EXPRESS OR .\" IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES .\" OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. .\" IN NO EVENT SHALL THE DEVELOPERS BE LIABLE FOR ANY DIRECT, INDIRECT, .\" INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT .\" NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, .\" DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY .\" THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT .\" (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF .\" THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. .\" .\" $FreeBSD$ .\" -.Dd March 16, 2017 +.Dd October 31, 2017 .Dt DTSEC 4 .Os .Sh NAME .Nm dtsec .Nd "Freescale Datapath Acceleration Architecture-based Three-Speed Ethernet Controller device driver" .Sh SYNOPSIS To compile this driver into the kernel, place the following lines in your kernel configuration file: .Bd -ragged -offset indent .Cd "include ""dpaa/config.dpaa"" .Cd "device dpaa" .Cd "device miibus" .Ed .Sh DESCRIPTION The .Nm driver provides support for the DPAA-based gigabit Ethernet controller integrated in some of the Freescale system-on-chip devices. .Pp The .Nm driver supports the following media types: .Bl -tag -width xxxxxxxxxxxxxxxxxxxx .It autoselect Enable autoselection of the media type and options .It 10baseT/UTP Set 10Mbps operation .It 100baseTX Set 100Mbps operation .It 1000baseT Set 1000baseT operation .El .Pp The .Nm driver supports the following media options: .Bl -tag -width xxxxxxxxxxxxxxxxxxxx .It full-duplex Set full duplex operation .El .Pp The .Nm driver supports two operating modes: .Bl -tag -width xxxxxxxxxxxxxxxxxxxx .It Regular Normal mode, utilizing the full datapath acceleration, Buffer Manager, and Queue Manager. .It Independent Runs disconnected from the Buffer Manager and Queue Manager. .El .Sh HARDWARE Gigabit Ethernet controllers built into the following Freescale system-on-chip devices are known to work with the .Nm driver: .Pp .Bl -bullet -compact .It P2041, P3041 .It P5010, P5020 .El +.Pp +Additionally, the following devices are expected to work, but are untested: +.Bl -bullet -compact +.It +P4080, P4040 +.It +P5040 +.El .Sh SEE ALSO .Xr altq 4 , .Xr arp 4 , .Xr miibus 4 , .Xr netintro 4 , .Xr ng_ether 4 , .Xr ifconfig 8 -.Sh BUGS -The -.Nm -driver assumes that there is only one Frame Manager, and that dtsec0 controls -the MDIO interface. Though this is the case for the supported devices, other -SoCs with the DPAA controller may not work correctly. Particularly, the P5040 -and P4080 SoCs have two frame managers, which breaks this assumption. .Sh HISTORY The .Nm device driver first appeared in .Fx 11.0 . .Sh AUTHORS .An -nosplit The base version of .Nm device driver was written by .An Semihalf . This manual page was written by .An Justin Hibbits . Index: head/sys/dev/dpaa/fman.c =================================================================== --- head/sys/dev/dpaa/fman.c (revision 325254) +++ head/sys/dev/dpaa/fman.c (revision 325255) @@ -1,586 +1,566 @@ /*- * Copyright (c) 2011-2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include "opt_platform.h" #include #include #include #include #include "fman.h" static MALLOC_DEFINE(M_FMAN, "fman", "fman devices information"); /** * @group FMan private defines. * @{ */ enum fman_irq_enum { FMAN_IRQ_NUM = 0, FMAN_ERR_IRQ_NUM = 1 }; enum fman_mu_ram_map { FMAN_MURAM_OFF = 0x0, FMAN_MURAM_SIZE = 0x28000 }; struct fman_config { device_t fman_device; uintptr_t mem_base_addr; uintptr_t irq_num; uintptr_t err_irq_num; uint8_t fm_id; t_FmExceptionsCallback *exception_callback; t_FmBusErrorCallback *bus_error_callback; }; /** * @group FMan private methods/members. * @{ */ /** * Frame Manager firmware. * We use the same firmware for both P3041 and P2041 devices. */ const uint32_t fman_firmware[] = FMAN_UC_IMG; const uint32_t fman_firmware_size = sizeof(fman_firmware); -static struct fman_softc *fm_sc = NULL; int fman_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { struct fman_softc *sc; bus_space_tag_t bt; bus_space_handle_t bh; int i, rv; sc = device_get_softc(bus); if (type != SYS_RES_IRQ) { for (i = 0; i < sc->sc_base.nranges; i++) { if (rman_is_region_manager(res, &sc->rman) != 0) { bt = rman_get_bustag(sc->mem_res); rv = bus_space_subregion(bt, rman_get_bushandle(sc->mem_res), rman_get_start(res) - rman_get_start(sc->mem_res), rman_get_size(res), &bh); if (rv != 0) return (rv); rman_set_bustag(res, bt); rman_set_bushandle(res, bh); return (rman_activate_resource(res)); } } return (EINVAL); } return (bus_generic_activate_resource(bus, child, type, rid, res)); } int fman_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { struct fman_softc *sc; struct resource_list *rl; struct resource_list_entry *rle; int passthrough, rv; passthrough = (device_get_parent(child) != bus); rl = BUS_GET_RESOURCE_LIST(bus, child); sc = device_get_softc(bus); if (type != SYS_RES_IRQ) { if ((rman_get_flags(res) & RF_ACTIVE) != 0 ){ rv = bus_deactivate_resource(child, type, rid, res); if (rv != 0) return (rv); } rv = rman_release_resource(res); if (rv != 0) return (rv); if (!passthrough) { rle = resource_list_find(rl, type, rid); KASSERT(rle != NULL, ("%s: resource entry not found!", __func__)); KASSERT(rle->res != NULL, ("%s: resource entry is not busy", __func__)); rle->res = NULL; } return (0); } return (resource_list_release(rl, bus, child, type, rid, res)); } struct resource * fman_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) { struct fman_softc *sc; struct resource_list *rl; struct resource_list_entry *rle = NULL; struct resource *res; int i, isdefault, passthrough; isdefault = RMAN_IS_DEFAULT_RANGE(start, end); passthrough = (device_get_parent(child) != bus); sc = device_get_softc(bus); rl = BUS_GET_RESOURCE_LIST(bus, child); switch (type) { case SYS_RES_MEMORY: KASSERT(!(isdefault && passthrough), ("%s: passthrough of default allocation", __func__)); if (!passthrough) { rle = resource_list_find(rl, type, *rid); if (rle == NULL) return (NULL); KASSERT(rle->res == NULL, ("%s: resource entry is busy", __func__)); if (isdefault) { start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); } } res = NULL; /* Map fman ranges to nexus ranges. */ for (i = 0; i < sc->sc_base.nranges; i++) { if (start >= sc->sc_base.ranges[i].bus && end < sc->sc_base.ranges[i].bus + sc->sc_base.ranges[i].size) { start += rman_get_start(sc->mem_res); end += rman_get_start(sc->mem_res); res = rman_reserve_resource(&sc->rman, start, end, count, flags & ~RF_ACTIVE, child); if (res == NULL) return (NULL); rman_set_rid(res, *rid); if ((flags & RF_ACTIVE) != 0 && bus_activate_resource( child, type, *rid, res) != 0) { rman_release_resource(res); return (NULL); } break; } } if (!passthrough) rle->res = res; return (res); case SYS_RES_IRQ: return (resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags)); } return (NULL); } static int fman_fill_ranges(phandle_t node, struct simplebus_softc *sc) { int host_address_cells; cell_t *base_ranges; ssize_t nbase_ranges; int err; int i, j, k; err = OF_searchencprop(OF_parent(node), "#address-cells", &host_address_cells, sizeof(host_address_cells)); if (err <= 0) return (-1); nbase_ranges = OF_getproplen(node, "ranges"); if (nbase_ranges < 0) return (-1); sc->nranges = nbase_ranges / sizeof(cell_t) / (sc->acells + host_address_cells + sc->scells); if (sc->nranges == 0) return (0); sc->ranges = malloc(sc->nranges * sizeof(sc->ranges[0]), M_DEVBUF, M_WAITOK); base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); OF_getencprop(node, "ranges", base_ranges, nbase_ranges); for (i = 0, j = 0; i < sc->nranges; i++) { sc->ranges[i].bus = 0; for (k = 0; k < sc->acells; k++) { sc->ranges[i].bus <<= 32; sc->ranges[i].bus |= base_ranges[j++]; } sc->ranges[i].host = 0; for (k = 0; k < host_address_cells; k++) { sc->ranges[i].host <<= 32; sc->ranges[i].host |= base_ranges[j++]; } sc->ranges[i].size = 0; for (k = 0; k < sc->scells; k++) { sc->ranges[i].size <<= 32; sc->ranges[i].size |= base_ranges[j++]; } } free(base_ranges, M_DEVBUF); return (sc->nranges); } static t_Handle fman_init(struct fman_softc *sc, struct fman_config *cfg) { phandle_t node; t_FmParams fm_params; t_Handle muram_handle, fm_handle; t_Error error; t_FmRevisionInfo revision_info; uint16_t clock; uint32_t tmp, mod; /* MURAM configuration */ muram_handle = FM_MURAM_ConfigAndInit(cfg->mem_base_addr + FMAN_MURAM_OFF, FMAN_MURAM_SIZE); if (muram_handle == NULL) { device_printf(cfg->fman_device, "couldn't init FM MURAM module" "\n"); return (NULL); } sc->muram_handle = muram_handle; /* Fill in FM configuration */ fm_params.fmId = cfg->fm_id; /* XXX we support only one partition thus each fman has master id */ fm_params.guestId = NCSW_MASTER_ID; fm_params.baseAddr = cfg->mem_base_addr; fm_params.h_FmMuram = muram_handle; /* Get FMan clock in Hz */ if ((tmp = fman_get_clock(sc)) == 0) return (NULL); /* Convert FMan clock to MHz */ clock = (uint16_t)(tmp / 1000000); mod = tmp % 1000000; if (mod >= 500000) ++clock; fm_params.fmClkFreq = clock; fm_params.f_Exception = cfg->exception_callback; fm_params.f_BusError = cfg->bus_error_callback; fm_params.h_App = cfg->fman_device; fm_params.irq = cfg->irq_num; fm_params.errIrq = cfg->err_irq_num; fm_params.firmware.size = fman_firmware_size; fm_params.firmware.p_Code = (uint32_t*)fman_firmware; fm_handle = FM_Config(&fm_params); if (fm_handle == NULL) { device_printf(cfg->fman_device, "couldn't configure FM " "module\n"); goto err; } FM_ConfigResetOnInit(fm_handle, TRUE); error = FM_Init(fm_handle); if (error != E_OK) { device_printf(cfg->fman_device, "couldn't init FM module\n"); goto err2; } error = FM_GetRevision(fm_handle, &revision_info); if (error != E_OK) { device_printf(cfg->fman_device, "couldn't get FM revision\n"); goto err2; } device_printf(cfg->fman_device, "Hardware version: %d.%d.\n", revision_info.majorRev, revision_info.minorRev); /* Initialize the simplebus part of things */ simplebus_init(sc->sc_base.dev, 0); node = ofw_bus_get_node(sc->sc_base.dev); fman_fill_ranges(node, &sc->sc_base); sc->rman.rm_type = RMAN_ARRAY; sc->rman.rm_descr = "FMan range"; rman_init_from_resource(&sc->rman, sc->mem_res); for (node = OF_child(node); node > 0; node = OF_peer(node)) { simplebus_add_device(sc->sc_base.dev, node, 0, NULL, -1, NULL); } return (fm_handle); err2: FM_Free(fm_handle); err: FM_MURAM_Free(muram_handle); return (NULL); } static void fman_exception_callback(t_Handle app_handle, e_FmExceptions exception) { struct fman_softc *sc; sc = app_handle; device_printf(sc->sc_base.dev, "FMan exception occurred.\n"); } static void fman_error_callback(t_Handle app_handle, e_FmPortType port_type, uint8_t port_id, uint64_t addr, uint8_t tnum, uint16_t liodn) { struct fman_softc *sc; sc = app_handle; device_printf(sc->sc_base.dev, "FMan error occurred.\n"); } /** @} */ /** * @group FMan driver interface. * @{ */ int -fman_get_handle(t_Handle *fmh) +fman_get_handle(device_t dev, t_Handle *fmh) { + struct fman_softc *sc = device_get_softc(dev); - if (fm_sc == NULL) - return (ENOMEM); + *fmh = sc->fm_handle; - *fmh = fm_sc->fm_handle; - return (0); } int -fman_get_muram_handle(t_Handle *muramh) +fman_get_muram_handle(device_t dev, t_Handle *muramh) { + struct fman_softc *sc = device_get_softc(dev); - if (fm_sc == NULL) - return (ENOMEM); + *muramh = sc->muram_handle; - *muramh = fm_sc->muram_handle; - return (0); } int -fman_get_bushandle(vm_offset_t *fm_base) +fman_get_bushandle(device_t dev, vm_offset_t *fm_base) { + struct fman_softc *sc = device_get_softc(dev); - if (fm_sc == NULL) - return (ENOMEM); + *fm_base = rman_get_bushandle(sc->mem_res); - *fm_base = rman_get_bushandle(fm_sc->mem_res); - return (0); } int -fman_get_dev(device_t *fm_dev) -{ - - if (fm_sc == NULL) - return (ENOMEM); - - *fm_dev = fm_sc->sc_base.dev; - - return (0); -} - -int fman_attach(device_t dev) { struct fman_softc *sc; struct fman_config cfg; pcell_t qchan_range[2]; phandle_t node; sc = device_get_softc(dev); sc->sc_base.dev = dev; - fm_sc = sc; /* Check if MallocSmart allocator is ready */ if (XX_MallocSmartInit() != E_OK) { device_printf(dev, "could not initialize smart allocator.\n"); return (ENXIO); } node = ofw_bus_get_node(dev); if (OF_getencprop(node, "fsl,qman-channel-range", qchan_range, sizeof(qchan_range)) <= 0) { device_printf(dev, "Missing QMan channel range property!\n"); return (ENXIO); } sc->qman_chan_base = qchan_range[0]; sc->qman_chan_count = qchan_range[1]; sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->mem_res) { device_printf(dev, "could not allocate memory.\n"); return (ENXIO); } sc->irq_rid = 0; sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_rid, RF_ACTIVE); if (!sc->irq_res) { device_printf(dev, "could not allocate interrupt.\n"); goto err; } /* * XXX: Fix FMan interrupt. This is workaround for the issue with * interrupts directed to multiple CPUs by the interrupts subsystem. * Workaround is to bind the interrupt to only one CPU0. */ XX_FmanFixIntr(rman_get_start(sc->irq_res)); sc->err_irq_rid = 1; sc->err_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->err_irq_rid, RF_ACTIVE | RF_SHAREABLE); if (!sc->err_irq_res) { device_printf(dev, "could not allocate error interrupt.\n"); goto err; } /* Set FMan configuration */ cfg.fman_device = dev; cfg.fm_id = device_get_unit(dev); cfg.mem_base_addr = rman_get_bushandle(sc->mem_res); cfg.irq_num = (uintptr_t)sc->irq_res; cfg.err_irq_num = (uintptr_t)sc->err_irq_res; cfg.exception_callback = fman_exception_callback; cfg.bus_error_callback = fman_error_callback; sc->fm_handle = fman_init(sc, &cfg); if (sc->fm_handle == NULL) { device_printf(dev, "could not be configured\n"); return (ENXIO); } return (bus_generic_attach(dev)); err: fman_detach(dev); return (ENXIO); } int fman_detach(device_t dev) { struct fman_softc *sc; sc = device_get_softc(dev); if (sc->muram_handle) { FM_MURAM_Free(sc->muram_handle); } if (sc->fm_handle) { FM_Free(sc->fm_handle); } if (sc->mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->irq_rid, sc->irq_res); } if (sc->irq_res) { bus_release_resource(dev, SYS_RES_IRQ, sc->err_irq_rid, sc->err_irq_res); } return (0); } int fman_suspend(device_t dev) { return (0); } int fman_resume_dev(device_t dev) { return (0); } int fman_shutdown(device_t dev) { return (0); } int fman_qman_channel_id(device_t dev, int port) { struct fman_softc *sc; int qman_port_id[] = {0x31, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07}; int i; sc = device_get_softc(dev); for (i = 0; i < sc->qman_chan_count; i++) { if (qman_port_id[i] == port) return (sc->qman_chan_base + i); } return (0); } /** @} */ Index: head/sys/dev/dpaa/fman.h =================================================================== --- head/sys/dev/dpaa/fman.h (revision 325254) +++ head/sys/dev/dpaa/fman.h (revision 325255) @@ -1,80 +1,79 @@ /*- * Copyright (c) 2011-2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef FMAN_H_ #define FMAN_H_ #include /** * FMan driver instance data. */ struct fman_softc { struct simplebus_softc sc_base; struct resource *mem_res; struct resource *irq_res; struct resource *err_irq_res; struct rman rman; int mem_rid; int irq_rid; int err_irq_rid; int qman_chan_base; int qman_chan_count; t_Handle fm_handle; t_Handle muram_handle; }; /** * @group QMan bus interface. * @{ */ struct resource * fman_alloc_resource(device_t bus, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags); int fman_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res); int fman_release_resource(device_t bus, device_t child, int type, int rid, struct resource *res); int fman_attach(device_t dev); int fman_detach(device_t dev); int fman_suspend(device_t dev); int fman_resume_dev(device_t dev); int fman_shutdown(device_t dev); int fman_read_ivar(device_t dev, device_t child, int index, uintptr_t *result); int fman_qman_channel_id(device_t, int); /** @} */ uint32_t fman_get_clock(struct fman_softc *sc); -int fman_get_handle(t_Handle *fmh); -int fman_get_muram_handle(t_Handle *muramh); -int fman_get_bushandle(vm_offset_t *fm_base); -int fman_get_dev(device_t *fmd); +int fman_get_handle(device_t dev, t_Handle *fmh); +int fman_get_muram_handle(device_t dev, t_Handle *muramh); +int fman_get_bushandle(device_t dev, vm_offset_t *fm_base); #endif /* FMAN_H_ */ Index: head/sys/dev/dpaa/fman_mdio.c =================================================================== --- head/sys/dev/dpaa/fman_mdio.c (revision 325254) +++ head/sys/dev/dpaa/fman_mdio.c (revision 325255) @@ -1,211 +1,211 @@ /*- * Copyright (c) 2016 Justin Hibbits * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "fman.h" #include "miibus_if.h" #define MDIO_LOCK() mtx_lock(&sc->sc_lock) #define MDIO_UNLOCK() mtx_unlock(&sc->sc_lock) #define MDIO_WRITE4(sc,r,v) \ bus_space_write_4(&bs_be_tag, sc->sc_handle, sc->sc_offset + r, v) #define MDIO_READ4(sc, r) \ bus_space_read_4(&bs_be_tag, sc->sc_handle, sc->sc_offset + r) #define MDIO_MIIMCFG 0x0 #define MDIO_MIIMCOM 0x4 #define MIIMCOM_SCAN_CYCLE 0x00000002 #define MIIMCOM_READ_CYCLE 0x00000001 #define MDIO_MIIMADD 0x8 #define MDIO_MIIMCON 0xc #define MDIO_MIIMSTAT 0x10 #define MDIO_MIIMIND 0x14 #define MIIMIND_BUSY 0x1 static int pqmdio_fdt_probe(device_t dev); static int pqmdio_fdt_attach(device_t dev); static int pqmdio_detach(device_t dev); static int pqmdio_miibus_readreg(device_t dev, int phy, int reg); static int pqmdio_miibus_writereg(device_t dev, int phy, int reg, int value); struct pqmdio_softc { struct mtx sc_lock; bus_space_handle_t sc_handle; int sc_offset; }; static device_method_t pqmdio_methods[] = { /* Device interface */ DEVMETHOD(device_probe, pqmdio_fdt_probe), DEVMETHOD(device_attach, pqmdio_fdt_attach), DEVMETHOD(device_detach, pqmdio_detach), /* MII interface */ DEVMETHOD(miibus_readreg, pqmdio_miibus_readreg), DEVMETHOD(miibus_writereg, pqmdio_miibus_writereg), { 0, 0 } }; static struct ofw_compat_data mdio_compat_data[] = { {"fsl,fman-mdio", 0}, {NULL, 0} }; static driver_t pqmdio_driver = { "pq_mdio", pqmdio_methods, sizeof(struct pqmdio_softc), }; static int pqmdio_fdt_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_search_compatible(dev, mdio_compat_data)->ocd_str) return (ENXIO); device_set_desc(dev, "Freescale QorIQ MDIO"); return (BUS_PROBE_DEFAULT); } static int pqmdio_fdt_attach(device_t dev) { struct pqmdio_softc *sc; rman_res_t start, count; sc = device_get_softc(dev); - fman_get_bushandle(&sc->sc_handle); + fman_get_bushandle(device_get_parent(dev), &sc->sc_handle); bus_get_resource(dev, SYS_RES_MEMORY, 0, &start, &count); sc->sc_offset = start; mtx_init(&sc->sc_lock, device_get_nameunit(dev), "QorIQ MDIO lock", MTX_DEF); return (0); } static int pqmdio_detach(device_t dev) { struct pqmdio_softc *sc; sc = device_get_softc(dev); mtx_destroy(&sc->sc_lock); return (0); } int pqmdio_miibus_readreg(device_t dev, int phy, int reg) { struct pqmdio_softc *sc; int rv; sc = device_get_softc(dev); MDIO_LOCK(); MDIO_WRITE4(sc, MDIO_MIIMADD, (phy << 8) | reg); MDIO_WRITE4(sc, MDIO_MIIMCOM, MIIMCOM_READ_CYCLE); MDIO_READ4(sc, MDIO_MIIMCOM); while ((MDIO_READ4(sc, MDIO_MIIMIND)) & MIIMIND_BUSY) ; rv = MDIO_READ4(sc, MDIO_MIIMSTAT); MDIO_WRITE4(sc, MDIO_MIIMCOM, 0); MDIO_READ4(sc, MDIO_MIIMCOM); MDIO_UNLOCK(); return (rv); } int pqmdio_miibus_writereg(device_t dev, int phy, int reg, int value) { struct pqmdio_softc *sc; sc = device_get_softc(dev); MDIO_LOCK(); /* Stop the MII management read cycle */ MDIO_WRITE4(sc, MDIO_MIIMCOM, 0); MDIO_READ4(sc, MDIO_MIIMCOM); MDIO_WRITE4(sc, MDIO_MIIMADD, (phy << 8) | reg); MDIO_WRITE4(sc, MDIO_MIIMCON, value); MDIO_READ4(sc, MDIO_MIIMCON); /* Wait till MII management write is complete */ while ((MDIO_READ4(sc, MDIO_MIIMIND)) & MIIMIND_BUSY) ; MDIO_UNLOCK(); return (0); } static devclass_t pqmdio_devclass; DRIVER_MODULE(pqmdio, fman, pqmdio_driver, pqmdio_devclass, 0, 0); DRIVER_MODULE(miibus, pqmdio, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(pqmdio, miibus, 1, 1, 1); Index: head/sys/dev/dpaa/if_dtsec.c =================================================================== --- head/sys/dev/dpaa/if_dtsec.c (revision 325254) +++ head/sys/dev/dpaa/if_dtsec.c (revision 325255) @@ -1,828 +1,830 @@ /*- * Copyright (c) 2011-2012 Semihalf. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "miibus_if.h" #include #include #include #include #include "fman.h" #include "if_dtsec.h" #include "if_dtsec_im.h" #include "if_dtsec_rm.h" /** * @group dTSEC private defines. * @{ */ /** * dTSEC FMan MAC exceptions info struct. */ struct dtsec_fm_mac_ex_str { const int num; const char *str; }; /** @} */ /** * @group FMan MAC routines. * @{ */ #define DTSEC_MAC_EXCEPTIONS_END (-1) /** * FMan MAC exceptions. */ static const struct dtsec_fm_mac_ex_str dtsec_fm_mac_exceptions[] = { { e_FM_MAC_EX_10G_MDIO_SCAN_EVENTMDIO, "MDIO scan event" }, { e_FM_MAC_EX_10G_MDIO_CMD_CMPL, "MDIO command completion" }, { e_FM_MAC_EX_10G_REM_FAULT, "Remote fault" }, { e_FM_MAC_EX_10G_LOC_FAULT, "Local fault" }, { e_FM_MAC_EX_10G_1TX_ECC_ER, "Transmit frame ECC error" }, { e_FM_MAC_EX_10G_TX_FIFO_UNFL, "Transmit FIFO underflow" }, { e_FM_MAC_EX_10G_TX_FIFO_OVFL, "Receive FIFO overflow" }, { e_FM_MAC_EX_10G_TX_ER, "Transmit frame error" }, { e_FM_MAC_EX_10G_RX_FIFO_OVFL, "Receive FIFO overflow" }, { e_FM_MAC_EX_10G_RX_ECC_ER, "Receive frame ECC error" }, { e_FM_MAC_EX_10G_RX_JAB_FRM, "Receive jabber frame" }, { e_FM_MAC_EX_10G_RX_OVRSZ_FRM, "Receive oversized frame" }, { e_FM_MAC_EX_10G_RX_RUNT_FRM, "Receive runt frame" }, { e_FM_MAC_EX_10G_RX_FRAG_FRM, "Receive fragment frame" }, { e_FM_MAC_EX_10G_RX_LEN_ER, "Receive payload length error" }, { e_FM_MAC_EX_10G_RX_CRC_ER, "Receive CRC error" }, { e_FM_MAC_EX_10G_RX_ALIGN_ER, "Receive alignment error" }, { e_FM_MAC_EX_1G_BAB_RX, "Babbling receive error" }, { e_FM_MAC_EX_1G_RX_CTL, "Receive control (pause frame) interrupt" }, { e_FM_MAC_EX_1G_GRATEFUL_TX_STP_COMPLET, "Graceful transmit stop " "complete" }, { e_FM_MAC_EX_1G_BAB_TX, "Babbling transmit error" }, { e_FM_MAC_EX_1G_TX_CTL, "Transmit control (pause frame) interrupt" }, { e_FM_MAC_EX_1G_TX_ERR, "Transmit error" }, { e_FM_MAC_EX_1G_LATE_COL, "Late collision" }, { e_FM_MAC_EX_1G_COL_RET_LMT, "Collision retry limit" }, { e_FM_MAC_EX_1G_TX_FIFO_UNDRN, "Transmit FIFO underrun" }, { e_FM_MAC_EX_1G_MAG_PCKT, "Magic Packet detected when dTSEC is in " "Magic Packet detection mode" }, { e_FM_MAC_EX_1G_MII_MNG_RD_COMPLET, "MII management read completion" }, { e_FM_MAC_EX_1G_MII_MNG_WR_COMPLET, "MII management write completion" }, { e_FM_MAC_EX_1G_GRATEFUL_RX_STP_COMPLET, "Graceful receive stop " "complete" }, { e_FM_MAC_EX_1G_TX_DATA_ERR, "Internal data error on transmit" }, { e_FM_MAC_EX_1G_RX_DATA_ERR, "Internal data error on receive" }, { e_FM_MAC_EX_1G_1588_TS_RX_ERR, "Time-Stamp Receive Error" }, { e_FM_MAC_EX_1G_RX_MIB_CNT_OVFL, "MIB counter overflow" }, { DTSEC_MAC_EXCEPTIONS_END, "" } }; static const char * dtsec_fm_mac_ex_to_str(e_FmMacExceptions exception) { int i; for (i = 0; dtsec_fm_mac_exceptions[i].num != exception && dtsec_fm_mac_exceptions[i].num != DTSEC_MAC_EXCEPTIONS_END; ++i) ; if (dtsec_fm_mac_exceptions[i].num == DTSEC_MAC_EXCEPTIONS_END) return (""); return (dtsec_fm_mac_exceptions[i].str); } static void dtsec_fm_mac_mdio_event_callback(t_Handle h_App, e_FmMacExceptions exception) { struct dtsec_softc *sc; sc = h_App; device_printf(sc->sc_dev, "MDIO event %i: %s.\n", exception, dtsec_fm_mac_ex_to_str(exception)); } static void dtsec_fm_mac_exception_callback(t_Handle app, e_FmMacExceptions exception) { struct dtsec_softc *sc; sc = app; device_printf(sc->sc_dev, "MAC exception %i: %s.\n", exception, dtsec_fm_mac_ex_to_str(exception)); } static void dtsec_fm_mac_free(struct dtsec_softc *sc) { if (sc->sc_mach == NULL) return; FM_MAC_Disable(sc->sc_mach, e_COMM_MODE_RX_AND_TX); FM_MAC_Free(sc->sc_mach); sc->sc_mach = NULL; } static int dtsec_fm_mac_init(struct dtsec_softc *sc, uint8_t *mac) { t_FmMacParams params; t_Error error; memset(¶ms, 0, sizeof(params)); memcpy(¶ms.addr, mac, sizeof(params.addr)); params.baseAddr = rman_get_bushandle(sc->sc_mem); params.enetMode = sc->sc_mac_enet_mode; params.macId = sc->sc_eth_id; params.mdioIrq = sc->sc_mac_mdio_irq; params.f_Event = dtsec_fm_mac_mdio_event_callback; params.f_Exception = dtsec_fm_mac_exception_callback; params.h_App = sc; params.h_Fm = sc->sc_fmh; sc->sc_mach = FM_MAC_Config(¶ms); if (sc->sc_mach == NULL) { device_printf(sc->sc_dev, "couldn't configure FM_MAC module.\n" ); return (ENXIO); } error = FM_MAC_ConfigResetOnInit(sc->sc_mach, TRUE); if (error != E_OK) { device_printf(sc->sc_dev, "couldn't enable reset on init " "feature.\n"); dtsec_fm_mac_free(sc); return (ENXIO); } /* Do not inform about pause frames */ error = FM_MAC_ConfigException(sc->sc_mach, e_FM_MAC_EX_1G_RX_CTL, FALSE); if (error != E_OK) { device_printf(sc->sc_dev, "couldn't disable pause frames " "exception.\n"); dtsec_fm_mac_free(sc); return (ENXIO); } error = FM_MAC_Init(sc->sc_mach); if (error != E_OK) { device_printf(sc->sc_dev, "couldn't initialize FM_MAC module." "\n"); dtsec_fm_mac_free(sc); return (ENXIO); } return (0); } /** @} */ /** * @group FMan PORT routines. * @{ */ static const char * dtsec_fm_port_ex_to_str(e_FmPortExceptions exception) { switch (exception) { case e_FM_PORT_EXCEPTION_IM_BUSY: return ("IM: RX busy"); default: return (""); } } void dtsec_fm_port_rx_exception_callback(t_Handle app, e_FmPortExceptions exception) { struct dtsec_softc *sc; sc = app; device_printf(sc->sc_dev, "RX exception: %i: %s.\n", exception, dtsec_fm_port_ex_to_str(exception)); } void dtsec_fm_port_tx_exception_callback(t_Handle app, e_FmPortExceptions exception) { struct dtsec_softc *sc; sc = app; device_printf(sc->sc_dev, "TX exception: %i: %s.\n", exception, dtsec_fm_port_ex_to_str(exception)); } e_FmPortType dtsec_fm_port_rx_type(enum eth_dev_type type) { switch (type) { case ETH_DTSEC: return (e_FM_PORT_TYPE_RX); case ETH_10GSEC: return (e_FM_PORT_TYPE_RX_10G); default: return (e_FM_PORT_TYPE_DUMMY); } } e_FmPortType dtsec_fm_port_tx_type(enum eth_dev_type type) { switch (type) { case ETH_DTSEC: return (e_FM_PORT_TYPE_TX); case ETH_10GSEC: return (e_FM_PORT_TYPE_TX_10G); default: return (e_FM_PORT_TYPE_DUMMY); } } static void dtsec_fm_port_free_both(struct dtsec_softc *sc) { if (sc->sc_rxph) { FM_PORT_Free(sc->sc_rxph); sc->sc_rxph = NULL; } if (sc->sc_txph) { FM_PORT_Free(sc->sc_txph); sc->sc_txph = NULL; } } /** @} */ /** * @group IFnet routines. * @{ */ static int dtsec_if_enable_locked(struct dtsec_softc *sc) { int error; DTSEC_LOCK_ASSERT(sc); error = FM_MAC_Enable(sc->sc_mach, e_COMM_MODE_RX_AND_TX); if (error != E_OK) return (EIO); error = FM_PORT_Enable(sc->sc_rxph); if (error != E_OK) return (EIO); error = FM_PORT_Enable(sc->sc_txph); if (error != E_OK) return (EIO); sc->sc_ifnet->if_drv_flags |= IFF_DRV_RUNNING; /* Refresh link state */ dtsec_miibus_statchg(sc->sc_dev); return (0); } static int dtsec_if_disable_locked(struct dtsec_softc *sc) { int error; DTSEC_LOCK_ASSERT(sc); error = FM_MAC_Disable(sc->sc_mach, e_COMM_MODE_RX_AND_TX); if (error != E_OK) return (EIO); error = FM_PORT_Disable(sc->sc_rxph); if (error != E_OK) return (EIO); error = FM_PORT_Disable(sc->sc_txph); if (error != E_OK) return (EIO); sc->sc_ifnet->if_drv_flags &= ~IFF_DRV_RUNNING; return (0); } static int dtsec_if_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct dtsec_softc *sc; struct ifreq *ifr; int error; sc = ifp->if_softc; ifr = (struct ifreq *)data; error = 0; /* Basic functionality to achieve media status reports */ switch (command) { case SIOCSIFFLAGS: DTSEC_LOCK(sc); if (sc->sc_ifnet->if_flags & IFF_UP) error = dtsec_if_enable_locked(sc); else error = dtsec_if_disable_locked(sc); DTSEC_UNLOCK(sc); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); } return (error); } static void dtsec_if_tick(void *arg) { struct dtsec_softc *sc; sc = arg; /* TODO */ DTSEC_LOCK(sc); mii_tick(sc->sc_mii); callout_reset(&sc->sc_tick_callout, hz, dtsec_if_tick, sc); DTSEC_UNLOCK(sc); } static void dtsec_if_deinit_locked(struct dtsec_softc *sc) { DTSEC_LOCK_ASSERT(sc); DTSEC_UNLOCK(sc); callout_drain(&sc->sc_tick_callout); DTSEC_LOCK(sc); } static void dtsec_if_init_locked(struct dtsec_softc *sc) { int error; DTSEC_LOCK_ASSERT(sc); /* Set MAC address */ error = FM_MAC_ModifyMacAddr(sc->sc_mach, (t_EnetAddr *)IF_LLADDR(sc->sc_ifnet)); if (error != E_OK) { device_printf(sc->sc_dev, "couldn't set MAC address.\n"); goto err; } /* Start MII polling */ if (sc->sc_mii) callout_reset(&sc->sc_tick_callout, hz, dtsec_if_tick, sc); if (sc->sc_ifnet->if_flags & IFF_UP) { error = dtsec_if_enable_locked(sc); if (error != 0) goto err; } else { error = dtsec_if_disable_locked(sc); if (error != 0) goto err; } return; err: dtsec_if_deinit_locked(sc); device_printf(sc->sc_dev, "initialization error.\n"); return; } static void dtsec_if_init(void *data) { struct dtsec_softc *sc; sc = data; DTSEC_LOCK(sc); dtsec_if_init_locked(sc); DTSEC_UNLOCK(sc); } static void dtsec_if_start(struct ifnet *ifp) { struct dtsec_softc *sc; sc = ifp->if_softc; DTSEC_LOCK(sc); sc->sc_start_locked(sc); DTSEC_UNLOCK(sc); } static void dtsec_if_watchdog(struct ifnet *ifp) { /* TODO */ } /** @} */ /** * @group IFmedia routines. * @{ */ static int dtsec_ifmedia_upd(struct ifnet *ifp) { struct dtsec_softc *sc = ifp->if_softc; DTSEC_LOCK(sc); mii_mediachg(sc->sc_mii); DTSEC_UNLOCK(sc); return (0); } static void dtsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct dtsec_softc *sc = ifp->if_softc; DTSEC_LOCK(sc); mii_pollstat(sc->sc_mii); ifmr->ifm_active = sc->sc_mii->mii_media_active; ifmr->ifm_status = sc->sc_mii->mii_media_status; DTSEC_UNLOCK(sc); } /** @} */ /** * @group dTSEC bus interface. * @{ */ static void dtsec_configure_mode(struct dtsec_softc *sc) { char tunable[64]; snprintf(tunable, sizeof(tunable), "%s.independent_mode", device_get_nameunit(sc->sc_dev)); sc->sc_mode = DTSEC_MODE_REGULAR; TUNABLE_INT_FETCH(tunable, &sc->sc_mode); if (sc->sc_mode == DTSEC_MODE_REGULAR) { sc->sc_port_rx_init = dtsec_rm_fm_port_rx_init; sc->sc_port_tx_init = dtsec_rm_fm_port_tx_init; sc->sc_start_locked = dtsec_rm_if_start_locked; } else { sc->sc_port_rx_init = dtsec_im_fm_port_rx_init; sc->sc_port_tx_init = dtsec_im_fm_port_tx_init; sc->sc_start_locked = dtsec_im_if_start_locked; } device_printf(sc->sc_dev, "Configured for %s mode.\n", (sc->sc_mode == DTSEC_MODE_REGULAR) ? "regular" : "independent"); } int dtsec_attach(device_t dev) { struct dtsec_softc *sc; + device_t parent; int error; struct ifnet *ifp; sc = device_get_softc(dev); + parent = device_get_parent(dev); sc->sc_dev = dev; sc->sc_mac_mdio_irq = NO_IRQ; sc->sc_eth_id = device_get_unit(dev); /* Check if MallocSmart allocator is ready */ if (XX_MallocSmartInit() != E_OK) return (ENXIO); /* Init locks */ mtx_init(&sc->sc_lock, device_get_nameunit(dev), "DTSEC Global Lock", MTX_DEF); mtx_init(&sc->sc_mii_lock, device_get_nameunit(dev), "DTSEC MII Lock", MTX_DEF); /* Init callouts */ callout_init(&sc->sc_tick_callout, CALLOUT_MPSAFE); /* Read configuraton */ - if ((error = fman_get_handle(&sc->sc_fmh)) != 0) + if ((error = fman_get_handle(parent, &sc->sc_fmh)) != 0) return (error); - if ((error = fman_get_muram_handle(&sc->sc_muramh)) != 0) + if ((error = fman_get_muram_handle(parent, &sc->sc_muramh)) != 0) return (error); - if ((error = fman_get_bushandle(&sc->sc_fm_base)) != 0) + if ((error = fman_get_bushandle(parent, &sc->sc_fm_base)) != 0) return (error); /* Configure working mode */ dtsec_configure_mode(sc); /* If we are working in regular mode configure BMAN and QMAN */ if (sc->sc_mode == DTSEC_MODE_REGULAR) { /* Create RX buffer pool */ error = dtsec_rm_pool_rx_init(sc); if (error != 0) return (EIO); /* Create RX frame queue range */ error = dtsec_rm_fqr_rx_init(sc); if (error != 0) return (EIO); /* Create frame info pool */ error = dtsec_rm_fi_pool_init(sc); if (error != 0) return (EIO); /* Create TX frame queue range */ error = dtsec_rm_fqr_tx_init(sc); if (error != 0) return (EIO); } /* Init FMan MAC module. */ error = dtsec_fm_mac_init(sc, sc->sc_mac_addr); if (error != 0) { dtsec_detach(dev); return (ENXIO); } /* Init FMan TX port */ error = sc->sc_port_tx_init(sc, device_get_unit(sc->sc_dev)); if (error != 0) { dtsec_detach(dev); return (ENXIO); } /* Init FMan RX port */ error = sc->sc_port_rx_init(sc, device_get_unit(sc->sc_dev)); if (error != 0) { dtsec_detach(dev); return (ENXIO); } /* Create network interface for upper layers */ ifp = sc->sc_ifnet = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(sc->sc_dev, "if_alloc() failed.\n"); dtsec_detach(dev); return (ENOMEM); } ifp->if_softc = sc; ifp->if_mtu = ETHERMTU; /* TODO: Configure */ ifp->if_flags = IFF_SIMPLEX | IFF_BROADCAST; ifp->if_init = dtsec_if_init; ifp->if_start = dtsec_if_start; ifp->if_ioctl = dtsec_if_ioctl; ifp->if_snd.ifq_maxlen = IFQ_MAXLEN; if (sc->sc_phy_addr >= 0) if_initname(ifp, device_get_name(sc->sc_dev), device_get_unit(sc->sc_dev)); else if_initname(ifp, "dtsec_phy", device_get_unit(sc->sc_dev)); /* TODO */ #if 0 IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1); ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1; IFQ_SET_READY(&ifp->if_snd); #endif ifp->if_capabilities = 0; /* TODO: Check */ ifp->if_capenable = ifp->if_capabilities; /* Attach PHY(s) */ error = mii_attach(sc->sc_dev, &sc->sc_mii_dev, ifp, dtsec_ifmedia_upd, dtsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->sc_phy_addr, MII_OFFSET_ANY, 0); if (error) { device_printf(sc->sc_dev, "attaching PHYs failed: %d\n", error); dtsec_detach(sc->sc_dev); return (error); } sc->sc_mii = device_get_softc(sc->sc_mii_dev); /* Attach to stack */ ether_ifattach(ifp, sc->sc_mac_addr); return (0); } int dtsec_detach(device_t dev) { struct dtsec_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->sc_ifnet; if (device_is_attached(dev)) { ether_ifdetach(ifp); /* Shutdown interface */ DTSEC_LOCK(sc); dtsec_if_deinit_locked(sc); DTSEC_UNLOCK(sc); } if (sc->sc_ifnet) { if_free(sc->sc_ifnet); sc->sc_ifnet = NULL; } if (sc->sc_mode == DTSEC_MODE_REGULAR) { /* Free RX/TX FQRs */ dtsec_rm_fqr_rx_free(sc); dtsec_rm_fqr_tx_free(sc); /* Free frame info pool */ dtsec_rm_fi_pool_free(sc); /* Free RX buffer pool */ dtsec_rm_pool_rx_free(sc); } dtsec_fm_mac_free(sc); dtsec_fm_port_free_both(sc); /* Destroy lock */ mtx_destroy(&sc->sc_lock); return (0); } int dtsec_suspend(device_t dev) { return (0); } int dtsec_resume(device_t dev) { return (0); } int dtsec_shutdown(device_t dev) { return (0); } /** @} */ /** * @group MII bus interface. * @{ */ int dtsec_miibus_readreg(device_t dev, int phy, int reg) { struct dtsec_softc *sc; sc = device_get_softc(dev); return (MIIBUS_READREG(sc->sc_mdio, phy, reg)); } int dtsec_miibus_writereg(device_t dev, int phy, int reg, int value) { struct dtsec_softc *sc; sc = device_get_softc(dev); return (MIIBUS_WRITEREG(sc->sc_mdio, phy, reg, value)); } void dtsec_miibus_statchg(device_t dev) { struct dtsec_softc *sc; e_EnetSpeed speed; bool duplex; int error; sc = device_get_softc(dev); DTSEC_LOCK_ASSERT(sc); duplex = ((sc->sc_mii->mii_media_active & IFM_GMASK) == IFM_FDX); switch (IFM_SUBTYPE(sc->sc_mii->mii_media_active)) { case IFM_1000_T: case IFM_1000_SX: speed = e_ENET_SPEED_1000; break; case IFM_100_TX: speed = e_ENET_SPEED_100; break; case IFM_10_T: speed = e_ENET_SPEED_10; break; default: speed = e_ENET_SPEED_10; } error = FM_MAC_AdjustLink(sc->sc_mach, speed, duplex); if (error != E_OK) device_printf(sc->sc_dev, "error while adjusting MAC speed.\n"); } /** @} */