Index: stable/11/sys/arm/ti/am335x/am335x_prcm.c =================================================================== --- stable/11/sys/arm/ti/am335x/am335x_prcm.c (revision 310854) +++ stable/11/sys/arm/ti/am335x/am335x_prcm.c (revision 310855) @@ -1,855 +1,856 @@ /*- * Copyright (c) 2012 Damjan Marion * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include "am335x_scm.h" + #define CM_PER 0 #define CM_PER_L4LS_CLKSTCTRL (CM_PER + 0x000) #define CM_PER_L3S_CLKSTCTRL (CM_PER + 0x004) #define CM_PER_L3_CLKSTCTRL (CM_PER + 0x00C) #define CM_PER_CPGMAC0_CLKCTRL (CM_PER + 0x014) #define CM_PER_LCDC_CLKCTRL (CM_PER + 0x018) #define CM_PER_USB0_CLKCTRL (CM_PER + 0x01C) #define CM_PER_TPTC0_CLKCTRL (CM_PER + 0x024) #define CM_PER_UART5_CLKCTRL (CM_PER + 0x038) #define CM_PER_MMC0_CLKCTRL (CM_PER + 0x03C) #define CM_PER_I2C2_CLKCTRL (CM_PER + 0x044) #define CM_PER_I2C1_CLKCTRL (CM_PER + 0x048) #define CM_PER_SPI0_CLKCTRL (CM_PER + 0x04C) #define CM_PER_SPI1_CLKCTRL (CM_PER + 0x050) #define CM_PER_UART1_CLKCTRL (CM_PER + 0x06C) #define CM_PER_UART2_CLKCTRL (CM_PER + 0x070) #define CM_PER_UART3_CLKCTRL (CM_PER + 0x074) #define CM_PER_UART4_CLKCTRL (CM_PER + 0x078) #define CM_PER_TIMER7_CLKCTRL (CM_PER + 0x07C) #define CM_PER_TIMER2_CLKCTRL (CM_PER + 0x080) #define CM_PER_TIMER3_CLKCTRL (CM_PER + 0x084) #define CM_PER_TIMER4_CLKCTRL (CM_PER + 0x088) #define CM_PER_GPIO1_CLKCTRL (CM_PER + 0x0AC) #define CM_PER_GPIO2_CLKCTRL (CM_PER + 0x0B0) #define CM_PER_GPIO3_CLKCTRL (CM_PER + 0x0B4) #define CM_PER_TPCC_CLKCTRL (CM_PER + 0x0BC) #define CM_PER_EPWMSS1_CLKCTRL (CM_PER + 0x0CC) #define CM_PER_EPWMSS0_CLKCTRL (CM_PER + 0x0D4) #define CM_PER_EPWMSS2_CLKCTRL (CM_PER + 0x0D8) #define CM_PER_L3_INSTR_CLKCTRL (CM_PER + 0x0DC) #define CM_PER_L3_CLKCTRL (CM_PER + 0x0E0) #define CM_PER_PRUSS_CLKCTRL (CM_PER + 0x0E8) #define CM_PER_TIMER5_CLKCTRL (CM_PER + 0x0EC) #define CM_PER_TIMER6_CLKCTRL (CM_PER + 0x0F0) #define CM_PER_MMC1_CLKCTRL (CM_PER + 0x0F4) #define CM_PER_MMC2_CLKCTRL (CM_PER + 0x0F8) #define CM_PER_TPTC1_CLKCTRL (CM_PER + 0x0FC) #define CM_PER_TPTC2_CLKCTRL (CM_PER + 0x100) #define CM_PER_SPINLOCK0_CLKCTRL (CM_PER + 0x10C) #define CM_PER_MAILBOX0_CLKCTRL (CM_PER + 0x110) #define CM_PER_OCPWP_L3_CLKSTCTRL (CM_PER + 0x12C) #define CM_PER_OCPWP_CLKCTRL (CM_PER + 0x130) #define CM_PER_CPSW_CLKSTCTRL (CM_PER + 0x144) #define CM_PER_PRUSS_CLKSTCTRL (CM_PER + 0x140) #define CM_WKUP 0x400 #define CM_WKUP_CLKSTCTRL (CM_WKUP + 0x000) #define CM_WKUP_CONTROL_CLKCTRL (CM_WKUP + 0x004) #define CM_WKUP_GPIO0_CLKCTRL (CM_WKUP + 0x008) #define CM_WKUP_CM_L3_AON_CLKSTCTRL (CM_WKUP + 0x01C) #define CM_WKUP_CM_CLKSEL_DPLL_MPU (CM_WKUP + 0x02C) #define CM_WKUP_CM_IDLEST_DPLL_DISP (CM_WKUP + 0x048) #define CM_WKUP_CM_CLKSEL_DPLL_DISP (CM_WKUP + 0x054) #define CM_WKUP_CM_CLKDCOLDO_DPLL_PER (CM_WKUP + 0x07C) #define CM_WKUP_CM_CLKMODE_DPLL_DISP (CM_WKUP + 0x098) #define CM_WKUP_I2C0_CLKCTRL (CM_WKUP + 0x0B8) #define CM_WKUP_ADC_TSC_CLKCTRL (CM_WKUP + 0x0BC) #define CM_DPLL 0x500 #define CLKSEL_TIMER7_CLK (CM_DPLL + 0x004) #define CLKSEL_TIMER2_CLK (CM_DPLL + 0x008) #define CLKSEL_TIMER3_CLK (CM_DPLL + 0x00C) #define CLKSEL_TIMER4_CLK (CM_DPLL + 0x010) #define CLKSEL_TIMER5_CLK (CM_DPLL + 0x018) #define CLKSEL_TIMER6_CLK (CM_DPLL + 0x01C) #define CLKSEL_PRUSS_OCP_CLK (CM_DPLL + 0x030) #define CM_RTC 0x800 #define CM_RTC_RTC_CLKCTRL (CM_RTC + 0x000) #define CM_RTC_CLKSTCTRL (CM_RTC + 0x004) #define PRM_PER 0xC00 #define PRM_PER_RSTCTRL (PRM_PER + 0x00) #define PRM_DEVICE_OFFSET 0xF00 #define PRM_RSTCTRL (PRM_DEVICE_OFFSET + 0x00) struct am335x_prcm_softc { struct resource * res[2]; bus_space_tag_t bst; bus_space_handle_t bsh; }; static struct resource_spec am335x_prcm_spec[] = { { SYS_RES_MEMORY, 0, RF_ACTIVE }, { -1, 0 } }; static struct am335x_prcm_softc *am335x_prcm_sc = NULL; static int am335x_clk_noop_activate(struct ti_clock_dev *clkdev); static int am335x_clk_generic_activate(struct ti_clock_dev *clkdev); static int am335x_clk_gpio_activate(struct ti_clock_dev *clkdev); static int am335x_clk_noop_deactivate(struct ti_clock_dev *clkdev); static int am335x_clk_generic_deactivate(struct ti_clock_dev *clkdev); static int am335x_clk_noop_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc); static int am335x_clk_generic_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc); static int am335x_clk_hsmmc_get_source_freq(struct ti_clock_dev *clkdev, unsigned int *freq); static int am335x_clk_get_sysclk_freq(struct ti_clock_dev *clkdev, unsigned int *freq); static int am335x_clk_get_arm_fclk_freq(struct ti_clock_dev *clkdev, unsigned int *freq); static int am335x_clk_get_arm_disp_freq(struct ti_clock_dev *clkdev, unsigned int *freq); static int am335x_clk_set_arm_disp_freq(struct ti_clock_dev *clkdev, unsigned int freq); static void am335x_prcm_reset(void); static int am335x_clk_cpsw_activate(struct ti_clock_dev *clkdev); static int am335x_clk_musb0_activate(struct ti_clock_dev *clkdev); static int am335x_clk_lcdc_activate(struct ti_clock_dev *clkdev); static int am335x_clk_pruss_activate(struct ti_clock_dev *clkdev); #define AM335X_NOOP_CLOCK_DEV(i) \ { .id = (i), \ .clk_activate = am335x_clk_noop_activate, \ .clk_deactivate = am335x_clk_noop_deactivate, \ .clk_set_source = am335x_clk_noop_set_source, \ .clk_accessible = NULL, \ .clk_get_source_freq = NULL, \ .clk_set_source_freq = NULL \ } #define AM335X_GENERIC_CLOCK_DEV(i) \ { .id = (i), \ .clk_activate = am335x_clk_generic_activate, \ .clk_deactivate = am335x_clk_generic_deactivate, \ .clk_set_source = am335x_clk_generic_set_source, \ .clk_accessible = NULL, \ .clk_get_source_freq = NULL, \ .clk_set_source_freq = NULL \ } #define AM335X_GPIO_CLOCK_DEV(i) \ { .id = (i), \ .clk_activate = am335x_clk_gpio_activate, \ .clk_deactivate = am335x_clk_generic_deactivate, \ .clk_set_source = am335x_clk_generic_set_source, \ .clk_accessible = NULL, \ .clk_get_source_freq = NULL, \ .clk_set_source_freq = NULL \ } #define AM335X_MMCHS_CLOCK_DEV(i) \ { .id = (i), \ .clk_activate = am335x_clk_generic_activate, \ .clk_deactivate = am335x_clk_generic_deactivate, \ .clk_set_source = am335x_clk_generic_set_source, \ .clk_accessible = NULL, \ .clk_get_source_freq = am335x_clk_hsmmc_get_source_freq, \ .clk_set_source_freq = NULL \ } struct ti_clock_dev ti_am335x_clk_devmap[] = { /* System clocks */ { .id = SYS_CLK, .clk_activate = NULL, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = am335x_clk_get_sysclk_freq, .clk_set_source_freq = NULL, }, /* MPU (ARM) core clocks */ { .id = MPU_CLK, .clk_activate = NULL, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = am335x_clk_get_arm_fclk_freq, .clk_set_source_freq = NULL, }, /* CPSW Ethernet Switch core clocks */ { .id = CPSW_CLK, .clk_activate = am335x_clk_cpsw_activate, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = NULL, .clk_set_source_freq = NULL, }, /* Mentor USB HS controller core clocks */ { .id = MUSB0_CLK, .clk_activate = am335x_clk_musb0_activate, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = NULL, .clk_set_source_freq = NULL, }, /* LCD controller clocks */ { .id = LCDC_CLK, .clk_activate = am335x_clk_lcdc_activate, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = am335x_clk_get_arm_disp_freq, .clk_set_source_freq = am335x_clk_set_arm_disp_freq, }, /* UART */ AM335X_NOOP_CLOCK_DEV(UART1_CLK), AM335X_GENERIC_CLOCK_DEV(UART2_CLK), AM335X_GENERIC_CLOCK_DEV(UART3_CLK), AM335X_GENERIC_CLOCK_DEV(UART4_CLK), AM335X_GENERIC_CLOCK_DEV(UART5_CLK), AM335X_GENERIC_CLOCK_DEV(UART6_CLK), /* DMTimer */ AM335X_GENERIC_CLOCK_DEV(TIMER2_CLK), AM335X_GENERIC_CLOCK_DEV(TIMER3_CLK), AM335X_GENERIC_CLOCK_DEV(TIMER4_CLK), AM335X_GENERIC_CLOCK_DEV(TIMER5_CLK), AM335X_GENERIC_CLOCK_DEV(TIMER6_CLK), AM335X_GENERIC_CLOCK_DEV(TIMER7_CLK), /* GPIO, we use hwmods as reference, not units in spec */ AM335X_GPIO_CLOCK_DEV(GPIO1_CLK), AM335X_GPIO_CLOCK_DEV(GPIO2_CLK), AM335X_GPIO_CLOCK_DEV(GPIO3_CLK), AM335X_GPIO_CLOCK_DEV(GPIO4_CLK), /* I2C we use hwmods as reference, not units in spec */ AM335X_GENERIC_CLOCK_DEV(I2C1_CLK), AM335X_GENERIC_CLOCK_DEV(I2C2_CLK), AM335X_GENERIC_CLOCK_DEV(I2C3_CLK), /* McSPI we use hwmods as reference, not units in spec */ AM335X_GENERIC_CLOCK_DEV(SPI0_CLK), AM335X_GENERIC_CLOCK_DEV(SPI1_CLK), /* TSC_ADC */ AM335X_GENERIC_CLOCK_DEV(TSC_ADC_CLK), /* EDMA */ AM335X_GENERIC_CLOCK_DEV(EDMA_TPCC_CLK), AM335X_GENERIC_CLOCK_DEV(EDMA_TPTC0_CLK), AM335X_GENERIC_CLOCK_DEV(EDMA_TPTC1_CLK), AM335X_GENERIC_CLOCK_DEV(EDMA_TPTC2_CLK), /* MMCHS */ AM335X_MMCHS_CLOCK_DEV(MMC1_CLK), AM335X_MMCHS_CLOCK_DEV(MMC2_CLK), AM335X_MMCHS_CLOCK_DEV(MMC3_CLK), /* PWMSS */ AM335X_GENERIC_CLOCK_DEV(PWMSS0_CLK), AM335X_GENERIC_CLOCK_DEV(PWMSS1_CLK), AM335X_GENERIC_CLOCK_DEV(PWMSS2_CLK), /* System Mailbox clock */ AM335X_GENERIC_CLOCK_DEV(MAILBOX0_CLK), /* SPINLOCK */ AM335X_GENERIC_CLOCK_DEV(SPINLOCK0_CLK), /* PRU-ICSS */ { .id = PRUSS_CLK, .clk_activate = am335x_clk_pruss_activate, .clk_deactivate = NULL, .clk_set_source = NULL, .clk_accessible = NULL, .clk_get_source_freq = NULL, .clk_set_source_freq = NULL, }, /* RTC */ AM335X_GENERIC_CLOCK_DEV(RTC_CLK), { INVALID_CLK_IDENT, NULL, NULL, NULL, NULL } }; struct am335x_clk_details { clk_ident_t id; uint32_t clkctrl_reg; uint32_t clksel_reg; }; #define _CLK_DETAIL(i, c, s) \ { .id = (i), \ .clkctrl_reg = (c), \ .clksel_reg = (s), \ } static struct am335x_clk_details g_am335x_clk_details[] = { /* UART. UART0 clock not controllable. */ _CLK_DETAIL(UART1_CLK, 0, 0), _CLK_DETAIL(UART2_CLK, CM_PER_UART1_CLKCTRL, 0), _CLK_DETAIL(UART3_CLK, CM_PER_UART2_CLKCTRL, 0), _CLK_DETAIL(UART4_CLK, CM_PER_UART3_CLKCTRL, 0), _CLK_DETAIL(UART5_CLK, CM_PER_UART4_CLKCTRL, 0), _CLK_DETAIL(UART6_CLK, CM_PER_UART5_CLKCTRL, 0), /* DMTimer modules */ _CLK_DETAIL(TIMER2_CLK, CM_PER_TIMER2_CLKCTRL, CLKSEL_TIMER2_CLK), _CLK_DETAIL(TIMER3_CLK, CM_PER_TIMER3_CLKCTRL, CLKSEL_TIMER3_CLK), _CLK_DETAIL(TIMER4_CLK, CM_PER_TIMER4_CLKCTRL, CLKSEL_TIMER4_CLK), _CLK_DETAIL(TIMER5_CLK, CM_PER_TIMER5_CLKCTRL, CLKSEL_TIMER5_CLK), _CLK_DETAIL(TIMER6_CLK, CM_PER_TIMER6_CLKCTRL, CLKSEL_TIMER6_CLK), _CLK_DETAIL(TIMER7_CLK, CM_PER_TIMER7_CLKCTRL, CLKSEL_TIMER7_CLK), /* GPIO modules, hwmods start with gpio1 */ _CLK_DETAIL(GPIO1_CLK, CM_WKUP_GPIO0_CLKCTRL, 0), _CLK_DETAIL(GPIO2_CLK, CM_PER_GPIO1_CLKCTRL, 0), _CLK_DETAIL(GPIO3_CLK, CM_PER_GPIO2_CLKCTRL, 0), _CLK_DETAIL(GPIO4_CLK, CM_PER_GPIO3_CLKCTRL, 0), /* I2C modules, hwmods start with i2c1 */ _CLK_DETAIL(I2C1_CLK, CM_WKUP_I2C0_CLKCTRL, 0), _CLK_DETAIL(I2C2_CLK, CM_PER_I2C1_CLKCTRL, 0), _CLK_DETAIL(I2C3_CLK, CM_PER_I2C2_CLKCTRL, 0), /* McSPI modules, hwmods start with spi0 */ _CLK_DETAIL(SPI0_CLK, CM_PER_SPI0_CLKCTRL, 0), _CLK_DETAIL(SPI1_CLK, CM_PER_SPI1_CLKCTRL, 0), /* TSC_ADC module */ _CLK_DETAIL(TSC_ADC_CLK, CM_WKUP_ADC_TSC_CLKCTRL, 0), /* EDMA modules */ _CLK_DETAIL(EDMA_TPCC_CLK, CM_PER_TPCC_CLKCTRL, 0), _CLK_DETAIL(EDMA_TPTC0_CLK, CM_PER_TPTC0_CLKCTRL, 0), _CLK_DETAIL(EDMA_TPTC1_CLK, CM_PER_TPTC1_CLKCTRL, 0), _CLK_DETAIL(EDMA_TPTC2_CLK, CM_PER_TPTC2_CLKCTRL, 0), /* MMCHS modules, hwmods start with mmc1*/ _CLK_DETAIL(MMC1_CLK, CM_PER_MMC0_CLKCTRL, 0), _CLK_DETAIL(MMC2_CLK, CM_PER_MMC1_CLKCTRL, 0), _CLK_DETAIL(MMC3_CLK, CM_PER_MMC1_CLKCTRL, 0), /* PWMSS modules */ _CLK_DETAIL(PWMSS0_CLK, CM_PER_EPWMSS0_CLKCTRL, 0), _CLK_DETAIL(PWMSS1_CLK, CM_PER_EPWMSS1_CLKCTRL, 0), _CLK_DETAIL(PWMSS2_CLK, CM_PER_EPWMSS2_CLKCTRL, 0), _CLK_DETAIL(MAILBOX0_CLK, CM_PER_MAILBOX0_CLKCTRL, 0), _CLK_DETAIL(SPINLOCK0_CLK, CM_PER_SPINLOCK0_CLKCTRL, 0), /* RTC module */ _CLK_DETAIL(RTC_CLK, CM_RTC_RTC_CLKCTRL, 0), { INVALID_CLK_IDENT, 0}, }; /* Read/Write macros */ #define prcm_read_4(reg) \ bus_space_read_4(am335x_prcm_sc->bst, am335x_prcm_sc->bsh, reg) #define prcm_write_4(reg, val) \ bus_space_write_4(am335x_prcm_sc->bst, am335x_prcm_sc->bsh, reg, val) void am335x_prcm_setup_dmtimer(int); static int am335x_prcm_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (ofw_bus_is_compatible(dev, "ti,am3-prcm")) { device_set_desc(dev, "AM335x Power and Clock Management"); return(BUS_PROBE_DEFAULT); } return (ENXIO); } static int am335x_prcm_attach(device_t dev) { struct am335x_prcm_softc *sc = device_get_softc(dev); unsigned int sysclk, fclk; if (am335x_prcm_sc) return (ENXIO); if (bus_alloc_resources(dev, am335x_prcm_spec, sc->res)) { device_printf(dev, "could not allocate resources\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->res[0]); sc->bsh = rman_get_bushandle(sc->res[0]); am335x_prcm_sc = sc; ti_cpu_reset = am335x_prcm_reset; if (am335x_clk_get_sysclk_freq(NULL, &sysclk) != 0) sysclk = 0; if (am335x_clk_get_arm_fclk_freq(NULL, &fclk) != 0) fclk = 0; if (sysclk && fclk) device_printf(dev, "Clocks: System %u.%01u MHz, CPU %u MHz\n", sysclk/1000000, (sysclk % 1000000)/100000, fclk/1000000); else device_printf(dev, "can't read frequencies yet (SCM device not ready?)\n"); return (0); } static device_method_t am335x_prcm_methods[] = { DEVMETHOD(device_probe, am335x_prcm_probe), DEVMETHOD(device_attach, am335x_prcm_attach), { 0, 0 } }; static driver_t am335x_prcm_driver = { "am335x_prcm", am335x_prcm_methods, sizeof(struct am335x_prcm_softc), }; static devclass_t am335x_prcm_devclass; DRIVER_MODULE(am335x_prcm, simplebus, am335x_prcm_driver, am335x_prcm_devclass, 0, 0); MODULE_VERSION(am335x_prcm, 1); MODULE_DEPEND(am335x_prcm, ti_scm, 1, 1, 1); static struct am335x_clk_details* am335x_clk_details(clk_ident_t id) { struct am335x_clk_details *walker; for (walker = g_am335x_clk_details; walker->id != INVALID_CLK_IDENT; walker++) { if (id == walker->id) return (walker); } return NULL; } static int am335x_clk_noop_activate(struct ti_clock_dev *clkdev) { return (0); } static int am335x_clk_generic_activate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; struct am335x_clk_details* clk_details; if (sc == NULL) return ENXIO; clk_details = am335x_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); /* set *_CLKCTRL register MODULEMODE[1:0] to enable(2) */ prcm_write_4(clk_details->clkctrl_reg, 2); while ((prcm_read_4(clk_details->clkctrl_reg) & 0x3) != 2) DELAY(10); return (0); } static int am335x_clk_gpio_activate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; struct am335x_clk_details* clk_details; if (sc == NULL) return ENXIO; clk_details = am335x_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); /* set *_CLKCTRL register MODULEMODE[1:0] to enable(2) */ /* set *_CLKCTRL register OPTFCLKEN_GPIO_1_G DBCLK[18] to FCLK_EN(1) */ prcm_write_4(clk_details->clkctrl_reg, 2 | (1 << 18)); while ((prcm_read_4(clk_details->clkctrl_reg) & (3 | (1 << 18) )) != (2 | (1 << 18))) DELAY(10); return (0); } static int am335x_clk_noop_deactivate(struct ti_clock_dev *clkdev) { return(0); } static int am335x_clk_generic_deactivate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; struct am335x_clk_details* clk_details; if (sc == NULL) return ENXIO; clk_details = am335x_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); /* set *_CLKCTRL register MODULEMODE[1:0] to disable(0) */ prcm_write_4(clk_details->clkctrl_reg, 0); while ((prcm_read_4(clk_details->clkctrl_reg) & 0x3) != 0) DELAY(10); return (0); } static int am335x_clk_noop_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc) { return (0); } static int am335x_clk_generic_set_source(struct ti_clock_dev *clkdev, clk_src_t clksrc) { struct am335x_prcm_softc *sc = am335x_prcm_sc; struct am335x_clk_details* clk_details; uint32_t reg; if (sc == NULL) return ENXIO; clk_details = am335x_clk_details(clkdev->id); if (clk_details == NULL) return (ENXIO); switch (clksrc) { case EXT_CLK: reg = 0; /* SEL2: TCLKIN clock */ break; case SYSCLK_CLK: reg = 1; /* SEL1: CLK_M_OSC clock */ break; case F32KHZ_CLK: reg = 2; /* SEL3: CLK_32KHZ clock */ break; default: return (ENXIO); } prcm_write_4(clk_details->clksel_reg, reg); while ((prcm_read_4(clk_details->clksel_reg) & 0x3) != reg) DELAY(10); return (0); } static int am335x_clk_hsmmc_get_source_freq(struct ti_clock_dev *clkdev, unsigned int *freq) { *freq = 96000000; return (0); } static int am335x_clk_get_sysclk_freq(struct ti_clock_dev *clkdev, unsigned int *freq) { uint32_t ctrl_status; - /* Read the input clock freq from the control module */ - /* control_status reg (0x40) */ - if (ti_scm_reg_read_4(0x40, &ctrl_status)) - return ENXIO; + /* Read the input clock freq from the control module. */ + if (ti_scm_reg_read_4(SCM_CTRL_STATUS, &ctrl_status)) + return (ENXIO); switch ((ctrl_status>>22) & 0x3) { case 0x0: /* 19.2Mhz */ *freq = 19200000; break; case 0x1: /* 24Mhz */ *freq = 24000000; break; case 0x2: /* 25Mhz */ *freq = 25000000; break; case 0x3: /* 26Mhz */ *freq = 26000000; break; } return (0); } #define DPLL_BYP_CLKSEL(reg) ((reg>>23) & 1) #define DPLL_DIV(reg) ((reg & 0x7f)+1) #define DPLL_MULT(reg) ((reg>>8) & 0x7FF) #define DPLL_MAX_MUL 0x800 #define DPLL_MAX_DIV 0x80 static int am335x_clk_get_arm_fclk_freq(struct ti_clock_dev *clkdev, unsigned int *freq) { uint32_t reg; uint32_t sysclk; reg = prcm_read_4(CM_WKUP_CM_CLKSEL_DPLL_MPU); /*Check if we are running in bypass */ if (DPLL_BYP_CLKSEL(reg)) return ENXIO; am335x_clk_get_sysclk_freq(NULL, &sysclk); *freq = DPLL_MULT(reg) * (sysclk / DPLL_DIV(reg)); return(0); } static int am335x_clk_get_arm_disp_freq(struct ti_clock_dev *clkdev, unsigned int *freq) { uint32_t reg; uint32_t sysclk; reg = prcm_read_4(CM_WKUP_CM_CLKSEL_DPLL_DISP); /*Check if we are running in bypass */ if (DPLL_BYP_CLKSEL(reg)) return ENXIO; am335x_clk_get_sysclk_freq(NULL, &sysclk); *freq = DPLL_MULT(reg) * (sysclk / DPLL_DIV(reg)); return(0); } static int am335x_clk_set_arm_disp_freq(struct ti_clock_dev *clkdev, unsigned int freq) { uint32_t sysclk; uint32_t mul, div; uint32_t i, j; unsigned int delta, min_delta; am335x_clk_get_sysclk_freq(NULL, &sysclk); /* Bypass mode */ prcm_write_4(CM_WKUP_CM_CLKMODE_DPLL_DISP, 0x4); /* Make sure it's in bypass mode */ while (!(prcm_read_4(CM_WKUP_CM_IDLEST_DPLL_DISP) & (1 << 8))) DELAY(10); /* Dumb and non-optimal implementation */ min_delta = freq; for (i = 1; i < DPLL_MAX_MUL; i++) { for (j = 1; j < DPLL_MAX_DIV; j++) { delta = abs(freq - i*(sysclk/j)); if (delta < min_delta) { mul = i; div = j; min_delta = delta; } if (min_delta == 0) break; } } prcm_write_4(CM_WKUP_CM_CLKSEL_DPLL_DISP, (mul << 8) | (div - 1)); /* Locked mode */ prcm_write_4(CM_WKUP_CM_CLKMODE_DPLL_DISP, 0x7); int timeout = 10000; while ((!(prcm_read_4(CM_WKUP_CM_IDLEST_DPLL_DISP) & (1 << 0))) && timeout--) DELAY(10); return(0); } static void am335x_prcm_reset(void) { prcm_write_4(PRM_RSTCTRL, (1<<1)); } static int am335x_clk_cpsw_activate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; if (sc == NULL) return ENXIO; /* set MODULENAME to ENABLE */ prcm_write_4(CM_PER_CPGMAC0_CLKCTRL, 2); /* wait for IDLEST to become Func(0) */ while(prcm_read_4(CM_PER_CPGMAC0_CLKCTRL) & (3<<16)); /*set CLKTRCTRL to SW_WKUP(2) */ prcm_write_4(CM_PER_CPSW_CLKSTCTRL, 2); /* wait for 125 MHz OCP clock to become active */ while((prcm_read_4(CM_PER_CPSW_CLKSTCTRL) & (1<<4)) == 0); return(0); } static int am335x_clk_musb0_activate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; if (sc == NULL) return ENXIO; /* set ST_DPLL_CLKDCOLDO(9) to CLK_GATED(1) */ /* set DPLL_CLKDCOLDO_GATE_CTRL(8) to CLK_ENABLE(1)*/ prcm_write_4(CM_WKUP_CM_CLKDCOLDO_DPLL_PER, 0x300); /*set MODULEMODE to ENABLE(2) */ prcm_write_4(CM_PER_USB0_CLKCTRL, 2); /* wait for MODULEMODE to become ENABLE(2) */ while ((prcm_read_4(CM_PER_USB0_CLKCTRL) & 0x3) != 2) DELAY(10); /* wait for IDLEST to become Func(0) */ while(prcm_read_4(CM_PER_USB0_CLKCTRL) & (3<<16)) DELAY(10); return(0); } static int am335x_clk_lcdc_activate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; if (sc == NULL) return (ENXIO); /* * For now set frequency to 2*VGA_PIXEL_CLOCK */ am335x_clk_set_arm_disp_freq(clkdev, 25175000*2); /*set MODULEMODE to ENABLE(2) */ prcm_write_4(CM_PER_LCDC_CLKCTRL, 2); /* wait for MODULEMODE to become ENABLE(2) */ while ((prcm_read_4(CM_PER_LCDC_CLKCTRL) & 0x3) != 2) DELAY(10); /* wait for IDLEST to become Func(0) */ while(prcm_read_4(CM_PER_LCDC_CLKCTRL) & (3<<16)) DELAY(10); return (0); } static int am335x_clk_pruss_activate(struct ti_clock_dev *clkdev) { struct am335x_prcm_softc *sc = am335x_prcm_sc; if (sc == NULL) return (ENXIO); /* Set MODULEMODE to ENABLE(2) */ prcm_write_4(CM_PER_PRUSS_CLKCTRL, 2); /* Wait for MODULEMODE to become ENABLE(2) */ while ((prcm_read_4(CM_PER_PRUSS_CLKCTRL) & 0x3) != 2) DELAY(10); /* Set CLKTRCTRL to SW_WKUP(2) */ prcm_write_4(CM_PER_PRUSS_CLKSTCTRL, 2); /* Wait for the 200 MHz OCP clock to become active */ while ((prcm_read_4(CM_PER_PRUSS_CLKSTCTRL) & (1<<4)) == 0) DELAY(10); /* Wait for the 200 MHz IEP clock to become active */ while ((prcm_read_4(CM_PER_PRUSS_CLKSTCTRL) & (1<<5)) == 0) DELAY(10); /* Wait for the 192 MHz UART clock to become active */ while ((prcm_read_4(CM_PER_PRUSS_CLKSTCTRL) & (1<<6)) == 0) DELAY(10); /* Select L3F as OCP clock */ prcm_write_4(CLKSEL_PRUSS_OCP_CLK, 0); while ((prcm_read_4(CLKSEL_PRUSS_OCP_CLK) & 0x3) != 0) DELAY(10); /* Clear the RESET bit */ prcm_write_4(PRM_PER_RSTCTRL, prcm_read_4(PRM_PER_RSTCTRL) & ~2); return (0); } Index: stable/11/sys/arm/ti/am335x/am335x_scm.h =================================================================== --- stable/11/sys/arm/ti/am335x/am335x_scm.h (revision 310854) +++ stable/11/sys/arm/ti/am335x/am335x_scm.h (revision 310855) @@ -1,38 +1,41 @@ /*- * Copyright (c) 2012 Oleksandr Tymoshenko * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD$ */ #ifndef __AM335X_SCM_H__ #define __AM335X_SCM_H__ /* AM335x-specific registers for control module (scm) */ +#define SCM_CTRL_STATUS 0x40 #define SCM_USB_CTRL0 0x620 #define SCM_USB_STS0 0x624 #define SCM_USB_CTRL1 0x628 #define SCM_USB_STS1 0x62C +#define SCM_MAC_ID0_LO 0x630 +#define SCM_MAC_ID0_HI 0x634 #define SCM_PWMSS_CTRL 0x664 #endif /* __AM335X_SCM_H__ */ Index: stable/11/sys/arm/ti/cpsw/if_cpsw.c =================================================================== --- stable/11/sys/arm/ti/cpsw/if_cpsw.c (revision 310854) +++ stable/11/sys/arm/ti/cpsw/if_cpsw.c (revision 310855) @@ -1,2620 +1,2621 @@ /*- * Copyright (c) 2012 Damjan Marion * Copyright (c) 2016 Rubicon Communications, LLC (Netgate) * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* * TI Common Platform Ethernet Switch (CPSW) Driver * Found in TI8148 "DaVinci" and AM335x "Sitara" SoCs. * * This controller is documented in the AM335x Technical Reference * Manual, in the TMS320DM814x DaVinci Digital Video Processors TRM * and in the TMS320C6452 3 Port Switch Ethernet Subsystem TRM. * * It is basically a single Ethernet port (port 0) wired internally to * a 3-port store-and-forward switch connected to two independent * "sliver" controllers (port 1 and port 2). You can operate the * controller in a variety of different ways by suitably configuring * the slivers and the Address Lookup Engine (ALE) that routes packets * between the ports. * * This code was developed and tested on a BeagleBone with * an AM335x SoC. */ #include __FBSDID("$FreeBSD$"); #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include +#include +#include + #include #include #include #include #include #include "if_cpswreg.h" #include "if_cpswvar.h" - -#include #include "miibus_if.h" /* Device probe/attach/detach. */ static int cpsw_probe(device_t); static int cpsw_attach(device_t); static int cpsw_detach(device_t); static int cpswp_probe(device_t); static int cpswp_attach(device_t); static int cpswp_detach(device_t); static phandle_t cpsw_get_node(device_t, device_t); /* Device Init/shutdown. */ static int cpsw_shutdown(device_t); static void cpswp_init(void *); static void cpswp_init_locked(void *); static void cpswp_stop_locked(struct cpswp_softc *); /* Device Suspend/Resume. */ static int cpsw_suspend(device_t); static int cpsw_resume(device_t); /* Ioctl. */ static int cpswp_ioctl(struct ifnet *, u_long command, caddr_t data); static int cpswp_miibus_readreg(device_t, int phy, int reg); static int cpswp_miibus_writereg(device_t, int phy, int reg, int value); static void cpswp_miibus_statchg(device_t); /* Send/Receive packets. */ static void cpsw_intr_rx(void *arg); static struct mbuf *cpsw_rx_dequeue(struct cpsw_softc *); static void cpsw_rx_enqueue(struct cpsw_softc *); static void cpswp_start(struct ifnet *); static void cpswp_tx_enqueue(struct cpswp_softc *); static int cpsw_tx_dequeue(struct cpsw_softc *); /* Misc interrupts and watchdog. */ static void cpsw_intr_rx_thresh(void *); static void cpsw_intr_misc(void *); static void cpswp_tick(void *); static void cpswp_ifmedia_sts(struct ifnet *, struct ifmediareq *); static int cpswp_ifmedia_upd(struct ifnet *); static void cpsw_tx_watchdog(void *); /* ALE support */ static void cpsw_ale_read_entry(struct cpsw_softc *, uint16_t, uint32_t *); static void cpsw_ale_write_entry(struct cpsw_softc *, uint16_t, uint32_t *); static int cpsw_ale_mc_entry_set(struct cpsw_softc *, uint8_t, int, uint8_t *); static void cpsw_ale_dump_table(struct cpsw_softc *); static int cpsw_ale_update_vlan_table(struct cpsw_softc *, int, int, int, int, int); static int cpswp_ale_update_addresses(struct cpswp_softc *, int); /* Statistics and sysctls. */ static void cpsw_add_sysctls(struct cpsw_softc *); static void cpsw_stats_collect(struct cpsw_softc *); static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS); /* * Arbitrary limit on number of segments in an mbuf to be transmitted. * Packets with more segments than this will be defragmented before * they are queued. */ #define CPSW_TXFRAGS 16 /* Shared resources. */ static device_method_t cpsw_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cpsw_probe), DEVMETHOD(device_attach, cpsw_attach), DEVMETHOD(device_detach, cpsw_detach), DEVMETHOD(device_shutdown, cpsw_shutdown), DEVMETHOD(device_suspend, cpsw_suspend), DEVMETHOD(device_resume, cpsw_resume), /* OFW methods */ DEVMETHOD(ofw_bus_get_node, cpsw_get_node), DEVMETHOD_END }; static driver_t cpsw_driver = { "cpswss", cpsw_methods, sizeof(struct cpsw_softc), }; static devclass_t cpsw_devclass; DRIVER_MODULE(cpswss, simplebus, cpsw_driver, cpsw_devclass, 0, 0); /* Port/Slave resources. */ static device_method_t cpswp_methods[] = { /* Device interface */ DEVMETHOD(device_probe, cpswp_probe), DEVMETHOD(device_attach, cpswp_attach), DEVMETHOD(device_detach, cpswp_detach), /* MII interface */ DEVMETHOD(miibus_readreg, cpswp_miibus_readreg), DEVMETHOD(miibus_writereg, cpswp_miibus_writereg), DEVMETHOD(miibus_statchg, cpswp_miibus_statchg), DEVMETHOD_END }; static driver_t cpswp_driver = { "cpsw", cpswp_methods, sizeof(struct cpswp_softc), }; static devclass_t cpswp_devclass; DRIVER_MODULE(cpsw, cpswss, cpswp_driver, cpswp_devclass, 0, 0); DRIVER_MODULE(miibus, cpsw, miibus_driver, miibus_devclass, 0, 0); MODULE_DEPEND(cpsw, ether, 1, 1, 1); MODULE_DEPEND(cpsw, miibus, 1, 1, 1); static uint32_t slave_mdio_addr[] = { 0x4a100200, 0x4a100300 }; static struct resource_spec irq_res_spec[] = { { SYS_RES_IRQ, 0, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 1, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 2, RF_ACTIVE | RF_SHAREABLE }, { SYS_RES_IRQ, 3, RF_ACTIVE | RF_SHAREABLE }, { -1, 0 } }; /* Number of entries here must match size of stats * array in struct cpswp_softc. */ static struct cpsw_stat { int reg; char *oid; } cpsw_stat_sysctls[CPSW_SYSCTL_COUNT] = { {0x00, "GoodRxFrames"}, {0x04, "BroadcastRxFrames"}, {0x08, "MulticastRxFrames"}, {0x0C, "PauseRxFrames"}, {0x10, "RxCrcErrors"}, {0x14, "RxAlignErrors"}, {0x18, "OversizeRxFrames"}, {0x1c, "RxJabbers"}, {0x20, "ShortRxFrames"}, {0x24, "RxFragments"}, {0x30, "RxOctets"}, {0x34, "GoodTxFrames"}, {0x38, "BroadcastTxFrames"}, {0x3c, "MulticastTxFrames"}, {0x40, "PauseTxFrames"}, {0x44, "DeferredTxFrames"}, {0x48, "CollisionsTxFrames"}, {0x4c, "SingleCollisionTxFrames"}, {0x50, "MultipleCollisionTxFrames"}, {0x54, "ExcessiveCollisions"}, {0x58, "LateCollisions"}, {0x5c, "TxUnderrun"}, {0x60, "CarrierSenseErrors"}, {0x64, "TxOctets"}, {0x68, "RxTx64OctetFrames"}, {0x6c, "RxTx65to127OctetFrames"}, {0x70, "RxTx128to255OctetFrames"}, {0x74, "RxTx256to511OctetFrames"}, {0x78, "RxTx512to1024OctetFrames"}, {0x7c, "RxTx1024upOctetFrames"}, {0x80, "NetOctets"}, {0x84, "RxStartOfFrameOverruns"}, {0x88, "RxMiddleOfFrameOverruns"}, {0x8c, "RxDmaOverruns"} }; /* * Basic debug support. */ #define IF_DEBUG(_sc) if ((_sc)->if_flags & IFF_DEBUG) static void cpsw_debugf_head(const char *funcname) { int t = (int)(time_second % (24 * 60 * 60)); printf("%02d:%02d:%02d %s ", t / (60 * 60), (t / 60) % 60, t % 60, funcname); } #include static void cpsw_debugf(const char *fmt, ...) { va_list ap; va_start(ap, fmt); vprintf(fmt, ap); va_end(ap); printf("\n"); } #define CPSW_DEBUGF(_sc, a) do { \ if (sc->debug) { \ cpsw_debugf_head(__func__); \ cpsw_debugf a; \ } \ } while (0) #define CPSWP_DEBUGF(_sc, a) do { \ IF_DEBUG((_sc)) { \ cpsw_debugf_head(__func__); \ cpsw_debugf a; \ } \ } while (0) /* * Locking macros */ #define CPSW_TX_LOCK(sc) do { \ mtx_assert(&(sc)->rx.lock, MA_NOTOWNED); \ mtx_lock(&(sc)->tx.lock); \ } while (0) #define CPSW_TX_UNLOCK(sc) mtx_unlock(&(sc)->tx.lock) #define CPSW_TX_LOCK_ASSERT(sc) mtx_assert(&(sc)->tx.lock, MA_OWNED) #define CPSW_RX_LOCK(sc) do { \ mtx_assert(&(sc)->tx.lock, MA_NOTOWNED); \ mtx_lock(&(sc)->rx.lock); \ } while (0) #define CPSW_RX_UNLOCK(sc) mtx_unlock(&(sc)->rx.lock) #define CPSW_RX_LOCK_ASSERT(sc) mtx_assert(&(sc)->rx.lock, MA_OWNED) #define CPSW_GLOBAL_LOCK(sc) do { \ if ((mtx_owned(&(sc)->tx.lock) ? 1 : 0) != \ (mtx_owned(&(sc)->rx.lock) ? 1 : 0)) { \ panic("cpsw deadlock possibility detection!"); \ } \ mtx_lock(&(sc)->tx.lock); \ mtx_lock(&(sc)->rx.lock); \ } while (0) #define CPSW_GLOBAL_UNLOCK(sc) do { \ CPSW_RX_UNLOCK(sc); \ CPSW_TX_UNLOCK(sc); \ } while (0) #define CPSW_GLOBAL_LOCK_ASSERT(sc) do { \ CPSW_TX_LOCK_ASSERT(sc); \ CPSW_RX_LOCK_ASSERT(sc); \ } while (0) #define CPSW_PORT_LOCK(_sc) do { \ mtx_assert(&(_sc)->lock, MA_NOTOWNED); \ mtx_lock(&(_sc)->lock); \ } while (0) #define CPSW_PORT_UNLOCK(_sc) mtx_unlock(&(_sc)->lock) #define CPSW_PORT_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->lock, MA_OWNED) /* * Read/Write macros */ #define cpsw_read_4(_sc, _reg) bus_read_4((_sc)->mem_res, (_reg)) #define cpsw_write_4(_sc, _reg, _val) \ bus_write_4((_sc)->mem_res, (_reg), (_val)) #define cpsw_cpdma_bd_offset(i) (CPSW_CPPI_RAM_OFFSET + ((i)*16)) #define cpsw_cpdma_bd_paddr(sc, slot) \ BUS_SPACE_PHYSADDR(sc->mem_res, slot->bd_offset) #define cpsw_cpdma_read_bd(sc, slot, val) \ bus_read_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) #define cpsw_cpdma_write_bd(sc, slot, val) \ bus_write_region_4(sc->mem_res, slot->bd_offset, (uint32_t *) val, 4) #define cpsw_cpdma_write_bd_next(sc, slot, next_slot) \ cpsw_write_4(sc, slot->bd_offset, cpsw_cpdma_bd_paddr(sc, next_slot)) #define cpsw_cpdma_read_bd_flags(sc, slot) \ bus_read_2(sc->mem_res, slot->bd_offset + 14) #define cpsw_write_hdp_slot(sc, queue, slot) \ cpsw_write_4(sc, (queue)->hdp_offset, cpsw_cpdma_bd_paddr(sc, slot)) #define CP_OFFSET (CPSW_CPDMA_TX_CP(0) - CPSW_CPDMA_TX_HDP(0)) #define cpsw_read_cp(sc, queue) \ cpsw_read_4(sc, (queue)->hdp_offset + CP_OFFSET) #define cpsw_write_cp(sc, queue, val) \ cpsw_write_4(sc, (queue)->hdp_offset + CP_OFFSET, (val)) #define cpsw_write_cp_slot(sc, queue, slot) \ cpsw_write_cp(sc, queue, cpsw_cpdma_bd_paddr(sc, slot)) #if 0 /* XXX temporary function versions for debugging. */ static void cpsw_write_hdp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) { uint32_t reg = queue->hdp_offset; uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); CPSW_DEBUGF(("HDP <=== 0x%08x (was 0x%08x)", v, cpsw_read_4(sc, reg))); cpsw_write_4(sc, reg, v); } static void cpsw_write_cp_slotX(struct cpsw_softc *sc, struct cpsw_queue *queue, struct cpsw_slot *slot) { uint32_t v = cpsw_cpdma_bd_paddr(sc, slot); CPSW_DEBUGF(("CP <=== 0x%08x (expecting 0x%08x)", v, cpsw_read_cp(sc, queue))); cpsw_write_cp(sc, queue, v); } #endif /* * Expanded dump routines for verbose debugging. */ static void cpsw_dump_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) { static const char *flags[] = {"SOP", "EOP", "Owner", "EOQ", "TDownCmplt", "PassCRC", "Long", "Short", "MacCtl", "Overrun", "PktErr1", "PortEn/PktErr0", "RxVlanEncap", "Port2", "Port1", "Port0"}; struct cpsw_cpdma_bd bd; const char *sep; int i; cpsw_cpdma_read_bd(sc, slot, &bd); printf("BD Addr: 0x%08x Next: 0x%08x\n", cpsw_cpdma_bd_paddr(sc, slot), bd.next); printf(" BufPtr: 0x%08x BufLen: 0x%08x\n", bd.bufptr, bd.buflen); printf(" BufOff: 0x%08x PktLen: 0x%08x\n", bd.bufoff, bd.pktlen); printf(" Flags: "); sep = ""; for (i = 0; i < 16; ++i) { if (bd.flags & (1 << (15 - i))) { printf("%s%s", sep, flags[i]); sep = ","; } } printf("\n"); if (slot->mbuf) { printf(" Ether: %14D\n", (char *)(slot->mbuf->m_data), " "); printf(" Packet: %16D\n", (char *)(slot->mbuf->m_data) + 14, " "); } } #define CPSW_DUMP_SLOT(cs, slot) do { \ IF_DEBUG(sc) { \ cpsw_dump_slot(sc, slot); \ } \ } while (0) static void cpsw_dump_queue(struct cpsw_softc *sc, struct cpsw_slots *q) { struct cpsw_slot *slot; int i = 0; int others = 0; STAILQ_FOREACH(slot, q, next) { if (i > 4) ++others; else cpsw_dump_slot(sc, slot); ++i; } if (others) printf(" ... and %d more.\n", others); printf("\n"); } #define CPSW_DUMP_QUEUE(sc, q) do { \ IF_DEBUG(sc) { \ cpsw_dump_queue(sc, q); \ } \ } while (0) static void cpsw_init_slots(struct cpsw_softc *sc) { struct cpsw_slot *slot; int i; STAILQ_INIT(&sc->avail); /* Put the slot descriptors onto the global avail list. */ for (i = 0; i < nitems(sc->_slots); i++) { slot = &sc->_slots[i]; slot->bd_offset = cpsw_cpdma_bd_offset(i); STAILQ_INSERT_TAIL(&sc->avail, slot, next); } } static int cpsw_add_slots(struct cpsw_softc *sc, struct cpsw_queue *queue, int requested) { const int max_slots = nitems(sc->_slots); struct cpsw_slot *slot; int i; if (requested < 0) requested = max_slots; for (i = 0; i < requested; ++i) { slot = STAILQ_FIRST(&sc->avail); if (slot == NULL) return (0); if (bus_dmamap_create(sc->mbuf_dtag, 0, &slot->dmamap)) { device_printf(sc->dev, "failed to create dmamap\n"); return (ENOMEM); } STAILQ_REMOVE_HEAD(&sc->avail, next); STAILQ_INSERT_TAIL(&queue->avail, slot, next); ++queue->avail_queue_len; ++queue->queue_slots; } return (0); } static void cpsw_free_slot(struct cpsw_softc *sc, struct cpsw_slot *slot) { int error; if (slot->dmamap) { if (slot->mbuf) bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); error = bus_dmamap_destroy(sc->mbuf_dtag, slot->dmamap); KASSERT(error == 0, ("Mapping still active")); slot->dmamap = NULL; } if (slot->mbuf) { m_freem(slot->mbuf); slot->mbuf = NULL; } } static void cpsw_reset(struct cpsw_softc *sc) { int i; callout_stop(&sc->watchdog.callout); /* Reset RMII/RGMII wrapper. */ cpsw_write_4(sc, CPSW_WR_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_WR_SOFT_RESET) & 1) ; /* Disable TX and RX interrupts for all cores. */ for (i = 0; i < 3; ++i) { cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_TX_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_RX_EN(i), 0x00); cpsw_write_4(sc, CPSW_WR_C_MISC_EN(i), 0x00); } /* Reset CPSW subsystem. */ cpsw_write_4(sc, CPSW_SS_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_SS_SOFT_RESET) & 1) ; /* Reset Sliver port 1 and 2 */ for (i = 0; i < 2; i++) { /* Reset */ cpsw_write_4(sc, CPSW_SL_SOFT_RESET(i), 1); while (cpsw_read_4(sc, CPSW_SL_SOFT_RESET(i)) & 1) ; } /* Reset DMA controller. */ cpsw_write_4(sc, CPSW_CPDMA_SOFT_RESET, 1); while (cpsw_read_4(sc, CPSW_CPDMA_SOFT_RESET) & 1) ; /* Disable TX & RX DMA */ cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 0); cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 0); /* Clear all queues. */ for (i = 0; i < 8; i++) { cpsw_write_4(sc, CPSW_CPDMA_TX_HDP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_HDP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_TX_CP(i), 0); cpsw_write_4(sc, CPSW_CPDMA_RX_CP(i), 0); } /* Clear all interrupt Masks */ cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_CLEAR, 0xFFFFFFFF); cpsw_write_4(sc, CPSW_CPDMA_TX_INTMASK_CLEAR, 0xFFFFFFFF); } static void cpsw_init(struct cpsw_softc *sc) { struct cpsw_slot *slot; uint32_t reg; /* Clear ALE */ cpsw_write_4(sc, CPSW_ALE_CONTROL, CPSW_ALE_CTL_CLEAR_TBL); /* Enable ALE */ reg = CPSW_ALE_CTL_ENABLE; if (sc->dualemac) reg |= CPSW_ALE_CTL_VLAN_AWARE; cpsw_write_4(sc, CPSW_ALE_CONTROL, reg); /* Set Host Port Mapping. */ cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_TX_PRI_MAP, 0x76543210); cpsw_write_4(sc, CPSW_PORT_P0_CPDMA_RX_CH_MAP, 0); /* Initialize ALE: set host port to forwarding(3). */ cpsw_write_4(sc, CPSW_ALE_PORTCTL(0), 3); cpsw_write_4(sc, CPSW_SS_PTYPE, 0); /* Enable statistics for ports 0, 1 and 2 */ cpsw_write_4(sc, CPSW_SS_STAT_PORT_EN, 7); /* Experiment: Turn off flow control */ /* This seems to fix the watchdog resets that have plagued earlier versions of this driver; I'm not yet sure if there are negative effects yet. */ cpsw_write_4(sc, CPSW_SS_FLOW_CONTROL, 0); /* Make IP hdr aligned with 4 */ cpsw_write_4(sc, CPSW_CPDMA_RX_BUFFER_OFFSET, 2); /* Initialize RX Buffer Descriptors */ cpsw_write_4(sc, CPSW_CPDMA_RX_FREEBUFFER(0), 0); /* Enable TX & RX DMA */ cpsw_write_4(sc, CPSW_CPDMA_TX_CONTROL, 1); cpsw_write_4(sc, CPSW_CPDMA_RX_CONTROL, 1); /* Enable Interrupts for core 0 */ cpsw_write_4(sc, CPSW_WR_C_RX_THRESH_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_RX_EN(0), 0xFF); cpsw_write_4(sc, CPSW_WR_C_MISC_EN(0), 0x1F); /* Enable host Error Interrupt */ cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_SET, 3); /* Enable interrupts for RX Channel 0 */ cpsw_write_4(sc, CPSW_CPDMA_RX_INTMASK_SET, 1); /* Initialze MDIO - ENABLE, PREAMBLE=0, FAULTENB, CLKDIV=0xFF */ /* TODO Calculate MDCLK=CLK/(CLKDIV+1) */ cpsw_write_4(sc, MDIOCONTROL, MDIOCTL_ENABLE | MDIOCTL_FAULTENB | 0xff); /* Select MII in GMII_SEL, Internal Delay mode */ //ti_scm_reg_write_4(0x650, 0); /* Initialize active queues. */ slot = STAILQ_FIRST(&sc->tx.active); if (slot != NULL) cpsw_write_hdp_slot(sc, &sc->tx, slot); slot = STAILQ_FIRST(&sc->rx.active); if (slot != NULL) cpsw_write_hdp_slot(sc, &sc->rx, slot); cpsw_rx_enqueue(sc); /* Activate network interface. */ sc->rx.running = 1; sc->tx.running = 1; sc->watchdog.timer = 0; callout_init(&sc->watchdog.callout, 0); callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); } /* * * Device Probe, Attach, Detach. * */ static int cpsw_probe(device_t dev) { if (!ofw_bus_status_okay(dev)) return (ENXIO); if (!ofw_bus_is_compatible(dev, "ti,cpsw")) return (ENXIO); device_set_desc(dev, "3-port Switch Ethernet Subsystem"); return (BUS_PROBE_DEFAULT); } static int cpsw_intr_attach(struct cpsw_softc *sc) { /* Note: We don't use sc->irq_res[2] (TX interrupt) */ if (bus_setup_intr(sc->dev, sc->irq_res[0], INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_rx_thresh, sc, &sc->ih_cookie[0]) != 0) { return (-1); } if (bus_setup_intr(sc->dev, sc->irq_res[1], INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_rx, sc, &sc->ih_cookie[1]) != 0) { return (-1); } if (bus_setup_intr(sc->dev, sc->irq_res[3], INTR_TYPE_NET | INTR_MPSAFE, NULL, cpsw_intr_misc, sc, &sc->ih_cookie[3]) != 0) { return (-1); } return (0); } static void cpsw_intr_detach(struct cpsw_softc *sc) { int i; for (i = 0; i < CPSW_INTR_COUNT; i++) { if (sc->ih_cookie[i]) { bus_teardown_intr(sc->dev, sc->irq_res[i], sc->ih_cookie[i]); } } } static int cpsw_get_fdt_data(struct cpsw_softc *sc, int port) { char *name; int len, phy, vlan; pcell_t phy_id[3], vlan_id; phandle_t child; unsigned long mdio_child_addr; /* Find any slave with phy_id */ phy = -1; vlan = -1; for (child = OF_child(sc->node); child != 0; child = OF_peer(child)) { if (OF_getprop_alloc(child, "name", 1, (void **)&name) < 0) continue; if (sscanf(name, "slave@%x", &mdio_child_addr) != 1) { OF_prop_free(name); continue; } OF_prop_free(name); if (mdio_child_addr != slave_mdio_addr[port]) continue; len = OF_getproplen(child, "phy_id"); if (len / sizeof(pcell_t) == 2) { /* Get phy address from fdt */ if (OF_getencprop(child, "phy_id", phy_id, len) > 0) phy = phy_id[1]; } len = OF_getproplen(child, "dual_emac_res_vlan"); if (len / sizeof(pcell_t) == 1) { /* Get phy address from fdt */ if (OF_getencprop(child, "dual_emac_res_vlan", &vlan_id, len) > 0) { vlan = vlan_id; } } break; } if (phy == -1) return (ENXIO); sc->port[port].phy = phy; sc->port[port].vlan = vlan; return (0); } static int cpsw_attach(device_t dev) { bus_dma_segment_t segs[1]; int error, i, nsegs; struct cpsw_softc *sc; uint32_t reg; sc = device_get_softc(dev); sc->dev = dev; sc->node = ofw_bus_get_node(dev); getbinuptime(&sc->attach_uptime); if (OF_getencprop(sc->node, "active_slave", &sc->active_slave, sizeof(sc->active_slave)) <= 0) { sc->active_slave = 0; } if (sc->active_slave > 1) sc->active_slave = 1; if (OF_hasprop(sc->node, "dual_emac")) sc->dualemac = 1; for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; if (cpsw_get_fdt_data(sc, i) != 0) { device_printf(dev, "failed to get PHY address from FDT\n"); return (ENXIO); } } /* Initialize mutexes */ mtx_init(&sc->tx.lock, device_get_nameunit(dev), "cpsw TX lock", MTX_DEF); mtx_init(&sc->rx.lock, device_get_nameunit(dev), "cpsw RX lock", MTX_DEF); /* Allocate IRQ resources */ error = bus_alloc_resources(dev, irq_res_spec, sc->irq_res); if (error) { device_printf(dev, "could not allocate IRQ resources\n"); cpsw_detach(dev); return (ENXIO); } sc->mem_rid = 0; sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->mem_rid, RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(sc->dev, "failed to allocate memory resource\n"); cpsw_detach(dev); return (ENXIO); } reg = cpsw_read_4(sc, CPSW_SS_IDVER); device_printf(dev, "CPSW SS Version %d.%d (%d)\n", (reg >> 8 & 0x7), reg & 0xFF, (reg >> 11) & 0x1F); cpsw_add_sysctls(sc); /* Allocate a busdma tag and DMA safe memory for mbufs. */ error = bus_dma_tag_create( bus_get_dma_tag(sc->dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filtfunc, filtfuncarg */ MCLBYTES, CPSW_TXFRAGS, /* maxsize, nsegments */ MCLBYTES, 0, /* maxsegsz, flags */ NULL, NULL, /* lockfunc, lockfuncarg */ &sc->mbuf_dtag); /* dmatag */ if (error) { device_printf(dev, "bus_dma_tag_create failed\n"); cpsw_detach(dev); return (error); } /* Allocate the null mbuf and pre-sync it. */ sc->null_mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); memset(sc->null_mbuf->m_data, 0, sc->null_mbuf->m_ext.ext_size); bus_dmamap_create(sc->mbuf_dtag, 0, &sc->null_mbuf_dmamap); bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, sc->null_mbuf_dmamap, sc->null_mbuf, segs, &nsegs, BUS_DMA_NOWAIT); bus_dmamap_sync(sc->mbuf_dtag, sc->null_mbuf_dmamap, BUS_DMASYNC_PREWRITE); sc->null_mbuf_paddr = segs[0].ds_addr; cpsw_init_slots(sc); /* Allocate slots to TX and RX queues. */ STAILQ_INIT(&sc->rx.avail); STAILQ_INIT(&sc->rx.active); STAILQ_INIT(&sc->tx.avail); STAILQ_INIT(&sc->tx.active); // For now: 128 slots to TX, rest to RX. // XXX TODO: start with 32/64 and grow dynamically based on demand. if (cpsw_add_slots(sc, &sc->tx, 128) || cpsw_add_slots(sc, &sc->rx, -1)) { device_printf(dev, "failed to allocate dmamaps\n"); cpsw_detach(dev); return (ENOMEM); } device_printf(dev, "Initial queue size TX=%d RX=%d\n", sc->tx.queue_slots, sc->rx.queue_slots); sc->tx.hdp_offset = CPSW_CPDMA_TX_HDP(0); sc->rx.hdp_offset = CPSW_CPDMA_RX_HDP(0); if (cpsw_intr_attach(sc) == -1) { device_printf(dev, "failed to setup interrupts\n"); cpsw_detach(dev); return (ENXIO); } /* Reset the controller. */ cpsw_reset(sc); cpsw_init(sc); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; sc->port[i].dev = device_add_child(dev, "cpsw", i); if (sc->port[i].dev == NULL) { cpsw_detach(dev); return (ENXIO); } } bus_generic_attach(dev); return (0); } static int cpsw_detach(device_t dev) { struct cpsw_softc *sc; int error, i; bus_generic_detach(dev); sc = device_get_softc(dev); for (i = 0; i < CPSW_PORTS; i++) { if (sc->port[i].dev) device_delete_child(dev, sc->port[i].dev); } if (device_is_attached(dev)) { callout_stop(&sc->watchdog.callout); callout_drain(&sc->watchdog.callout); } /* Stop and release all interrupts */ cpsw_intr_detach(sc); /* Free dmamaps and mbufs */ for (i = 0; i < nitems(sc->_slots); ++i) cpsw_free_slot(sc, &sc->_slots[i]); /* Free null mbuf. */ if (sc->null_mbuf_dmamap) { bus_dmamap_unload(sc->mbuf_dtag, sc->null_mbuf_dmamap); error = bus_dmamap_destroy(sc->mbuf_dtag, sc->null_mbuf_dmamap); KASSERT(error == 0, ("Mapping still active")); m_freem(sc->null_mbuf); } /* Free DMA tag */ if (sc->mbuf_dtag) { error = bus_dma_tag_destroy(sc->mbuf_dtag); KASSERT(error == 0, ("Unable to destroy DMA tag")); } /* Free IO memory handler */ if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, sc->mem_rid, sc->mem_res); bus_release_resources(dev, irq_res_spec, sc->irq_res); /* Destroy mutexes */ mtx_destroy(&sc->rx.lock); mtx_destroy(&sc->tx.lock); return (0); } static phandle_t cpsw_get_node(device_t bus, device_t dev) { /* Share controller node with port device. */ return (ofw_bus_get_node(bus)); } static int cpswp_probe(device_t dev) { if (device_get_unit(dev) > 1) { device_printf(dev, "Only two ports are supported.\n"); return (ENXIO); } device_set_desc(dev, "Ethernet Switch Port"); return (BUS_PROBE_DEFAULT); } static int cpswp_attach(device_t dev) { int error; struct ifnet *ifp; struct cpswp_softc *sc; uint32_t reg; uint8_t mac_addr[ETHER_ADDR_LEN]; sc = device_get_softc(dev); sc->dev = dev; sc->pdev = device_get_parent(dev); sc->swsc = device_get_softc(sc->pdev); sc->unit = device_get_unit(dev); sc->phy = sc->swsc->port[sc->unit].phy; sc->vlan = sc->swsc->port[sc->unit].vlan; if (sc->swsc->dualemac && sc->vlan == -1) sc->vlan = sc->unit + 1; if (sc->unit == 0) { sc->physel = MDIOUSERPHYSEL0; sc->phyaccess = MDIOUSERACCESS0; } else { sc->physel = MDIOUSERPHYSEL1; sc->phyaccess = MDIOUSERACCESS1; } mtx_init(&sc->lock, device_get_nameunit(dev), "cpsw port lock", MTX_DEF); /* Allocate network interface */ ifp = sc->ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { cpswp_detach(dev); return (ENXIO); } if_initname(ifp, device_get_name(sc->dev), sc->unit); ifp->if_softc = sc; ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_HWCSUM; //FIXME VLAN? ifp->if_capenable = ifp->if_capabilities; ifp->if_init = cpswp_init; ifp->if_start = cpswp_start; ifp->if_ioctl = cpswp_ioctl; ifp->if_snd.ifq_drv_maxlen = sc->swsc->tx.queue_slots; IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); IFQ_SET_READY(&ifp->if_snd); /* Get high part of MAC address from control module (mac_id[0|1]_hi) */ - ti_scm_reg_read_4(0x634 + sc->unit * 8, ®); + ti_scm_reg_read_4(SCM_MAC_ID0_HI + sc->unit * 8, ®); mac_addr[0] = reg & 0xFF; mac_addr[1] = (reg >> 8) & 0xFF; mac_addr[2] = (reg >> 16) & 0xFF; mac_addr[3] = (reg >> 24) & 0xFF; /* Get low part of MAC address from control module (mac_id[0|1]_lo) */ - ti_scm_reg_read_4(0x630 + sc->unit * 8, ®); + ti_scm_reg_read_4(SCM_MAC_ID0_LO + sc->unit * 8, ®); mac_addr[4] = reg & 0xFF; mac_addr[5] = (reg >> 8) & 0xFF; error = mii_attach(dev, &sc->miibus, ifp, cpswp_ifmedia_upd, cpswp_ifmedia_sts, BMSR_DEFCAPMASK, sc->phy, MII_OFFSET_ANY, 0); if (error) { device_printf(dev, "attaching PHYs failed\n"); cpswp_detach(dev); return (error); } sc->mii = device_get_softc(sc->miibus); /* Select PHY and enable interrupts */ cpsw_write_4(sc->swsc, sc->physel, MDIO_PHYSEL_LINKINTENB | (sc->phy & 0x1F)); ether_ifattach(sc->ifp, mac_addr); callout_init(&sc->mii_callout, 0); return (0); } static int cpswp_detach(device_t dev) { struct cpswp_softc *sc; sc = device_get_softc(dev); CPSWP_DEBUGF(sc, ("")); if (device_is_attached(dev)) { ether_ifdetach(sc->ifp); CPSW_PORT_LOCK(sc); cpswp_stop_locked(sc); CPSW_PORT_UNLOCK(sc); callout_drain(&sc->mii_callout); } bus_generic_detach(dev); if_free(sc->ifp); mtx_destroy(&sc->lock); return (0); } /* * * Init/Shutdown. * */ static int cpsw_ports_down(struct cpsw_softc *sc) { struct cpswp_softc *psc; struct ifnet *ifp1, *ifp2; if (!sc->dualemac) return (1); psc = device_get_softc(sc->port[0].dev); ifp1 = psc->ifp; psc = device_get_softc(sc->port[1].dev); ifp2 = psc->ifp; if ((ifp1->if_flags & IFF_UP) == 0 && (ifp2->if_flags & IFF_UP) == 0) return (1); return (0); } static void cpswp_init(void *arg) { struct cpswp_softc *sc = arg; CPSWP_DEBUGF(sc, ("")); CPSW_PORT_LOCK(sc); cpswp_init_locked(arg); CPSW_PORT_UNLOCK(sc); } static void cpswp_init_locked(void *arg) { struct cpswp_softc *sc = arg; struct ifnet *ifp; uint32_t reg; CPSWP_DEBUGF(sc, ("")); CPSW_PORT_LOCK_ASSERT(sc); ifp = sc->ifp; if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) return; getbinuptime(&sc->init_uptime); if (!sc->swsc->rx.running && !sc->swsc->tx.running) { /* Reset the controller. */ cpsw_reset(sc->swsc); cpsw_init(sc->swsc); } /* Set Slave Mapping. */ cpsw_write_4(sc->swsc, CPSW_SL_RX_PRI_MAP(sc->unit), 0x76543210); cpsw_write_4(sc->swsc, CPSW_PORT_P_TX_PRI_MAP(sc->unit + 1), 0x33221100); cpsw_write_4(sc->swsc, CPSW_SL_RX_MAXLEN(sc->unit), 0x5f2); /* Enable MAC RX/TX modules. */ /* TODO: Docs claim that IFCTL_B and IFCTL_A do the same thing? */ /* Huh? Docs call bit 0 "Loopback" some places, "FullDuplex" others. */ reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); reg |= CPSW_SL_MACTL_GMII_ENABLE; cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); /* Initialize ALE: set port to forwarding(3), initialize addrs */ cpsw_write_4(sc->swsc, CPSW_ALE_PORTCTL(sc->unit + 1), 3); cpswp_ale_update_addresses(sc, 1); if (sc->swsc->dualemac) { /* Set Port VID. */ cpsw_write_4(sc->swsc, CPSW_PORT_P_VLAN(sc->unit + 1), sc->vlan & 0xfff); cpsw_ale_update_vlan_table(sc->swsc, sc->vlan, (1 << (sc->unit + 1)) | (1 << 0), /* Member list */ (1 << (sc->unit + 1)) | (1 << 0), /* Untagged egress */ (1 << (sc->unit + 1)) | (1 << 0), 0); /* mcast reg flood */ } mii_mediachg(sc->mii); callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); ifp->if_drv_flags |= IFF_DRV_RUNNING; ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; } static int cpsw_shutdown(device_t dev) { struct cpsw_softc *sc; struct cpswp_softc *psc; int i; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("")); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } return (0); } static void cpsw_rx_teardown_locked(struct cpsw_softc *sc) { struct ifnet *ifp; struct mbuf *received, *next; int i = 0; CPSW_DEBUGF(sc, ("starting RX teardown")); cpsw_write_4(sc, CPSW_CPDMA_RX_TEARDOWN, 0); for (;;) { received = cpsw_rx_dequeue(sc); CPSW_GLOBAL_UNLOCK(sc); while (received != NULL) { next = received->m_nextpkt; received->m_nextpkt = NULL; ifp = received->m_pkthdr.rcvif; (*ifp->if_input)(ifp, received); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); received = next; } CPSW_GLOBAL_LOCK(sc); if (!sc->rx.running) { CPSW_DEBUGF(sc, ("finished RX teardown (%d retries)", i)); return; } if (++i > 10) { device_printf(sc->dev, "Unable to cleanly shutdown receiver\n"); return; } DELAY(10); } } static void cpsw_tx_teardown_locked(struct cpsw_softc *sc) { int i = 0; CPSW_DEBUGF(sc, ("starting TX teardown")); cpsw_write_4(sc, CPSW_CPDMA_TX_TEARDOWN, 0); cpsw_tx_dequeue(sc); while (sc->tx.running && ++i < 10) { DELAY(10); cpsw_tx_dequeue(sc); } if (sc->tx.running) { device_printf(sc->dev, "Unable to cleanly shutdown transmitter\n"); } CPSW_DEBUGF(sc, ("finished TX teardown (%d retries, %d idle buffers)", i, sc->tx.active_queue_len)); } static void cpswp_stop_locked(struct cpswp_softc *sc) { struct ifnet *ifp; uint32_t reg; ifp = sc->ifp; CPSWP_DEBUGF(sc, ("")); CPSW_PORT_LOCK_ASSERT(sc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) return; /* Disable interface */ ifp->if_drv_flags &= ~IFF_DRV_RUNNING; ifp->if_drv_flags |= IFF_DRV_OACTIVE; /* Stop ticker */ callout_stop(&sc->mii_callout); /* Tear down the RX/TX queues. */ if (cpsw_ports_down(sc->swsc)) { CPSW_GLOBAL_LOCK(sc->swsc); cpsw_rx_teardown_locked(sc->swsc); cpsw_tx_teardown_locked(sc->swsc); CPSW_GLOBAL_UNLOCK(sc->swsc); } /* Stop MAC RX/TX modules. */ reg = cpsw_read_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit)); reg &= ~CPSW_SL_MACTL_GMII_ENABLE; cpsw_write_4(sc->swsc, CPSW_SL_MACCONTROL(sc->unit), reg); if (cpsw_ports_down(sc->swsc)) { /* Capture stats before we reset controller. */ cpsw_stats_collect(sc->swsc); cpsw_reset(sc->swsc); cpsw_init(sc->swsc); } } /* * Suspend/Resume. */ static int cpsw_suspend(device_t dev) { struct cpsw_softc *sc; struct cpswp_softc *psc; int i; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("")); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } return (0); } static int cpsw_resume(device_t dev) { struct cpsw_softc *sc; sc = device_get_softc(dev); CPSW_DEBUGF(sc, ("UNIMPLEMENTED")); return (0); } /* * * IOCTL * */ static void cpsw_set_promisc(struct cpswp_softc *sc, int set) { uint32_t reg; /* * Enabling promiscuous mode requires ALE_BYPASS to be enabled. * That disables the ALE forwarding logic and causes every * packet to be sent only to the host port. In bypass mode, * the ALE processes host port transmit packets the same as in * normal mode. */ reg = cpsw_read_4(sc->swsc, CPSW_ALE_CONTROL); reg &= ~CPSW_ALE_CTL_BYPASS; if (set) reg |= CPSW_ALE_CTL_BYPASS; cpsw_write_4(sc->swsc, CPSW_ALE_CONTROL, reg); } static void cpsw_set_allmulti(struct cpswp_softc *sc, int set) { if (set) { printf("All-multicast mode unimplemented\n"); } } static int cpswp_ioctl(struct ifnet *ifp, u_long command, caddr_t data) { struct cpswp_softc *sc; struct ifreq *ifr; int error; uint32_t changed; error = 0; sc = ifp->if_softc; ifr = (struct ifreq *)data; switch (command) { case SIOCSIFFLAGS: CPSW_PORT_LOCK(sc); if (ifp->if_flags & IFF_UP) { if (ifp->if_drv_flags & IFF_DRV_RUNNING) { changed = ifp->if_flags ^ sc->if_flags; CPSWP_DEBUGF(sc, ("SIOCSIFFLAGS: UP & RUNNING (changed=0x%x)", changed)); if (changed & IFF_PROMISC) cpsw_set_promisc(sc, ifp->if_flags & IFF_PROMISC); if (changed & IFF_ALLMULTI) cpsw_set_allmulti(sc, ifp->if_flags & IFF_ALLMULTI); } else { CPSWP_DEBUGF(sc, ("SIOCSIFFLAGS: UP but not RUNNING; starting up")); cpswp_init_locked(sc); } } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) { CPSWP_DEBUGF(sc, ("SIOCSIFFLAGS: not UP but RUNNING; shutting down")); cpswp_stop_locked(sc); } sc->if_flags = ifp->if_flags; CPSW_PORT_UNLOCK(sc); break; case SIOCADDMULTI: cpswp_ale_update_addresses(sc, 0); break; case SIOCDELMULTI: /* Ugh. DELMULTI doesn't provide the specific address being removed, so the best we can do is remove everything and rebuild it all. */ cpswp_ale_update_addresses(sc, 1); break; case SIOCGIFMEDIA: case SIOCSIFMEDIA: error = ifmedia_ioctl(ifp, ifr, &sc->mii->mii_media, command); break; default: error = ether_ioctl(ifp, command, data); } return (error); } /* * * MIIBUS * */ static int cpswp_miibus_ready(struct cpsw_softc *sc, uint32_t reg) { uint32_t r, retries = CPSW_MIIBUS_RETRIES; while (--retries) { r = cpsw_read_4(sc, reg); if ((r & MDIO_PHYACCESS_GO) == 0) return (1); DELAY(CPSW_MIIBUS_DELAY); } return (0); } static int cpswp_miibus_readreg(device_t dev, int phy, int reg) { struct cpswp_softc *sc; uint32_t cmd, r; sc = device_get_softc(dev); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO not ready to read\n"); return (0); } /* Set GO, reg, phy */ cmd = MDIO_PHYACCESS_GO | (reg & 0x1F) << 21 | (phy & 0x1F) << 16; cpsw_write_4(sc->swsc, sc->phyaccess, cmd); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO timed out during read\n"); return (0); } r = cpsw_read_4(sc->swsc, sc->phyaccess); if ((r & MDIO_PHYACCESS_ACK) == 0) { device_printf(dev, "Failed to read from PHY.\n"); r = 0; } return (r & 0xFFFF); } static int cpswp_miibus_writereg(device_t dev, int phy, int reg, int value) { struct cpswp_softc *sc; uint32_t cmd; sc = device_get_softc(dev); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO not ready to write\n"); return (0); } /* Set GO, WRITE, reg, phy, and value */ cmd = MDIO_PHYACCESS_GO | MDIO_PHYACCESS_WRITE | (reg & 0x1F) << 21 | (phy & 0x1F) << 16 | (value & 0xFFFF); cpsw_write_4(sc->swsc, sc->phyaccess, cmd); if (!cpswp_miibus_ready(sc->swsc, sc->phyaccess)) { device_printf(dev, "MDIO timed out during write\n"); return (0); } if ((cpsw_read_4(sc->swsc, sc->phyaccess) & MDIO_PHYACCESS_ACK) == 0) device_printf(dev, "Failed to write to PHY.\n"); return (0); } static void cpswp_miibus_statchg(device_t dev) { struct cpswp_softc *sc; uint32_t mac_control, reg; sc = device_get_softc(dev); CPSWP_DEBUGF(sc, ("")); reg = CPSW_SL_MACCONTROL(sc->unit); mac_control = cpsw_read_4(sc->swsc, reg); mac_control &= ~(CPSW_SL_MACTL_GIG | CPSW_SL_MACTL_IFCTL_A | CPSW_SL_MACTL_IFCTL_B | CPSW_SL_MACTL_FULLDUPLEX); switch(IFM_SUBTYPE(sc->mii->mii_media_active)) { case IFM_1000_SX: case IFM_1000_LX: case IFM_1000_CX: case IFM_1000_T: mac_control |= CPSW_SL_MACTL_GIG; break; case IFM_100_TX: mac_control |= CPSW_SL_MACTL_IFCTL_A; break; } if (sc->mii->mii_media_active & IFM_FDX) mac_control |= CPSW_SL_MACTL_FULLDUPLEX; cpsw_write_4(sc->swsc, reg, mac_control); } /* * * Transmit/Receive Packets. * */ static void cpsw_intr_rx(void *arg) { struct cpsw_softc *sc = arg; struct ifnet *ifp; struct mbuf *received, *next; CPSW_RX_LOCK(sc); received = cpsw_rx_dequeue(sc); cpsw_rx_enqueue(sc); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 1); CPSW_RX_UNLOCK(sc); while (received != NULL) { next = received->m_nextpkt; received->m_nextpkt = NULL; ifp = received->m_pkthdr.rcvif; (*ifp->if_input)(ifp, received); if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); received = next; } } static struct mbuf * cpsw_rx_dequeue(struct cpsw_softc *sc) { struct cpsw_cpdma_bd bd; struct cpsw_slot *slot; struct cpswp_softc *psc; struct mbuf *mb_head, *mb_tail; int port, removed = 0; mb_head = mb_tail = NULL; /* Pull completed packets off hardware RX queue. */ while ((slot = STAILQ_FIRST(&sc->rx.active)) != NULL) { cpsw_cpdma_read_bd(sc, slot, &bd); if (bd.flags & CPDMA_BD_OWNER) break; /* Still in use by hardware */ CPSW_DEBUGF(sc, ("Removing received packet from RX queue")); ++removed; STAILQ_REMOVE_HEAD(&sc->rx.active, next); STAILQ_INSERT_TAIL(&sc->rx.avail, slot, next); bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTREAD); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); if (bd.flags & CPDMA_BD_TDOWNCMPLT) { CPSW_DEBUGF(sc, ("RX teardown in progress")); m_freem(slot->mbuf); slot->mbuf = NULL; cpsw_write_cp(sc, &sc->rx, 0xfffffffc); sc->rx.running = 0; break; } cpsw_write_cp_slot(sc, &sc->rx, slot); port = (bd.flags & CPDMA_BD_PORT_MASK) - 1; KASSERT(port >= 0 && port <= 1, ("patcket received with invalid port: %d", port)); psc = device_get_softc(sc->port[port].dev); /* Set up mbuf */ /* TODO: track SOP/EOP bits to assemble a full mbuf out of received fragments. */ slot->mbuf->m_data += bd.bufoff; slot->mbuf->m_len = bd.pktlen - 4; slot->mbuf->m_pkthdr.len = bd.pktlen - 4; slot->mbuf->m_flags |= M_PKTHDR; slot->mbuf->m_pkthdr.rcvif = psc->ifp; slot->mbuf->m_nextpkt = NULL; if ((psc->ifp->if_capenable & IFCAP_RXCSUM) != 0) { /* check for valid CRC by looking into pkt_err[5:4] */ if ((bd.flags & CPDMA_BD_PKT_ERR_MASK) == 0) { slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_CHECKED; slot->mbuf->m_pkthdr.csum_flags |= CSUM_IP_VALID; slot->mbuf->m_pkthdr.csum_data = 0xffff; } } /* Add mbuf to packet list to be returned. */ if (mb_tail) { mb_tail->m_nextpkt = slot->mbuf; } else { mb_head = slot->mbuf; } mb_tail = slot->mbuf; slot->mbuf = NULL; } if (removed != 0) { sc->rx.queue_removes += removed; sc->rx.active_queue_len -= removed; sc->rx.avail_queue_len += removed; if (sc->rx.avail_queue_len > sc->rx.max_avail_queue_len) sc->rx.max_avail_queue_len = sc->rx.avail_queue_len; } return (mb_head); } static void cpsw_rx_enqueue(struct cpsw_softc *sc) { bus_dma_segment_t seg[1]; struct cpsw_cpdma_bd bd; struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); struct cpsw_slot *slot, *prev_slot = NULL; struct cpsw_slot *last_old_slot, *first_new_slot; int error, nsegs, added = 0; /* Register new mbufs with hardware. */ while ((slot = STAILQ_FIRST(&sc->rx.avail)) != NULL) { if (slot->mbuf == NULL) { slot->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); if (slot->mbuf == NULL) { device_printf(sc->dev, "Unable to fill RX queue\n"); break; } slot->mbuf->m_len = slot->mbuf->m_pkthdr.len = slot->mbuf->m_ext.ext_size; } error = bus_dmamap_load_mbuf_sg(sc->mbuf_dtag, slot->dmamap, slot->mbuf, seg, &nsegs, BUS_DMA_NOWAIT); KASSERT(nsegs == 1, ("More than one segment (nsegs=%d)", nsegs)); KASSERT(error == 0, ("DMA error (error=%d)", error)); if (error != 0 || nsegs != 1) { device_printf(sc->dev, "%s: Can't prep RX buf for DMA (nsegs=%d, error=%d)\n", __func__, nsegs, error); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; break; } bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREREAD); /* Create and submit new rx descriptor*/ bd.next = 0; bd.bufptr = seg->ds_addr; bd.bufoff = 0; bd.buflen = MCLBYTES - 1; bd.pktlen = bd.buflen; bd.flags = CPDMA_BD_OWNER; cpsw_cpdma_write_bd(sc, slot, &bd); ++added; if (prev_slot != NULL) cpsw_cpdma_write_bd_next(sc, prev_slot, slot); prev_slot = slot; STAILQ_REMOVE_HEAD(&sc->rx.avail, next); sc->rx.avail_queue_len--; STAILQ_INSERT_TAIL(&tmpqueue, slot, next); } if (added == 0) return; CPSW_DEBUGF(sc, ("Adding %d buffers to RX queue", added)); /* Link new entries to hardware RX queue. */ last_old_slot = STAILQ_LAST(&sc->rx.active, cpsw_slot, next); first_new_slot = STAILQ_FIRST(&tmpqueue); STAILQ_CONCAT(&sc->rx.active, &tmpqueue); if (first_new_slot == NULL) { return; } else if (last_old_slot == NULL) { /* Start a fresh queue. */ cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); } else { /* Add buffers to end of current queue. */ cpsw_cpdma_write_bd_next(sc, last_old_slot, first_new_slot); /* If underrun, restart queue. */ if (cpsw_cpdma_read_bd_flags(sc, last_old_slot) & CPDMA_BD_EOQ) { cpsw_write_hdp_slot(sc, &sc->rx, first_new_slot); } } sc->rx.queue_adds += added; sc->rx.active_queue_len += added; if (sc->rx.active_queue_len > sc->rx.max_active_queue_len) { sc->rx.max_active_queue_len = sc->rx.active_queue_len; } } static void cpswp_start(struct ifnet *ifp) { struct cpswp_softc *sc = ifp->if_softc; CPSW_TX_LOCK(sc->swsc); if ((ifp->if_drv_flags & IFF_DRV_RUNNING) && sc->swsc->tx.running) { cpswp_tx_enqueue(sc); cpsw_tx_dequeue(sc->swsc); } CPSW_TX_UNLOCK(sc->swsc); } static void cpswp_tx_enqueue(struct cpswp_softc *sc) { bus_dma_segment_t segs[CPSW_TXFRAGS]; struct cpsw_cpdma_bd bd; struct cpsw_slots tmpqueue = STAILQ_HEAD_INITIALIZER(tmpqueue); struct cpsw_slot *slot, *prev_slot = NULL; struct cpsw_slot *last_old_slot, *first_new_slot; struct mbuf *m0; int error, flags, nsegs, seg, added = 0, padlen; flags = 0; if (sc->swsc->dualemac) { flags = CPDMA_BD_TO_PORT | ((sc->unit + 1) & CPDMA_BD_PORT_MASK); } /* Pull pending packets from IF queue and prep them for DMA. */ while ((slot = STAILQ_FIRST(&sc->swsc->tx.avail)) != NULL) { IF_DEQUEUE(&sc->ifp->if_snd, m0); if (m0 == NULL) break; slot->mbuf = m0; padlen = ETHER_MIN_LEN - slot->mbuf->m_pkthdr.len; if (padlen < 0) padlen = 0; /* Create mapping in DMA memory */ error = bus_dmamap_load_mbuf_sg(sc->swsc->mbuf_dtag, slot->dmamap, slot->mbuf, segs, &nsegs, BUS_DMA_NOWAIT); /* If the packet is too fragmented, try to simplify. */ if (error == EFBIG || (error == 0 && nsegs + (padlen > 0 ? 1 : 0) > sc->swsc->tx.avail_queue_len)) { bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); if (padlen > 0) /* May as well add padding. */ m_append(slot->mbuf, padlen, sc->swsc->null_mbuf->m_data); m0 = m_defrag(slot->mbuf, M_NOWAIT); if (m0 == NULL) { device_printf(sc->dev, "Can't defragment packet; dropping\n"); m_freem(slot->mbuf); } else { CPSWP_DEBUGF(sc, ("Requeueing defragmented packet")); IF_PREPEND(&sc->ifp->if_snd, m0); } slot->mbuf = NULL; continue; } if (error != 0) { device_printf(sc->dev, "%s: Can't setup DMA (error=%d), dropping packet\n", __func__, error); bus_dmamap_unload(sc->swsc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; break; } bus_dmamap_sync(sc->swsc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_PREWRITE); CPSWP_DEBUGF(sc, ("Queueing TX packet: %d segments + %d pad bytes", nsegs, padlen)); slot->ifp = sc->ifp; /* If there is only one segment, the for() loop * gets skipped and the single buffer gets set up * as both SOP and EOP. */ /* Start by setting up the first buffer */ bd.next = 0; bd.bufptr = segs[0].ds_addr; bd.bufoff = 0; bd.buflen = segs[0].ds_len; bd.pktlen = m_length(slot->mbuf, NULL) + padlen; bd.flags = CPDMA_BD_SOP | CPDMA_BD_OWNER | flags; for (seg = 1; seg < nsegs; ++seg) { /* Save the previous buffer (which isn't EOP) */ cpsw_cpdma_write_bd(sc->swsc, slot, &bd); if (prev_slot != NULL) { cpsw_cpdma_write_bd_next(sc->swsc, prev_slot, slot); } prev_slot = slot; STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); sc->swsc->tx.avail_queue_len--; STAILQ_INSERT_TAIL(&tmpqueue, slot, next); ++added; slot = STAILQ_FIRST(&sc->swsc->tx.avail); /* Setup next buffer (which isn't SOP) */ bd.next = 0; bd.bufptr = segs[seg].ds_addr; bd.bufoff = 0; bd.buflen = segs[seg].ds_len; bd.pktlen = 0; bd.flags = CPDMA_BD_OWNER | flags; } /* Save the final buffer. */ if (padlen <= 0) bd.flags |= CPDMA_BD_EOP; cpsw_cpdma_write_bd(sc->swsc, slot, &bd); if (prev_slot != NULL) cpsw_cpdma_write_bd_next(sc->swsc, prev_slot, slot); prev_slot = slot; STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); sc->swsc->tx.avail_queue_len--; STAILQ_INSERT_TAIL(&tmpqueue, slot, next); ++added; if (padlen > 0) { slot = STAILQ_FIRST(&sc->swsc->tx.avail); STAILQ_REMOVE_HEAD(&sc->swsc->tx.avail, next); sc->swsc->tx.avail_queue_len--; STAILQ_INSERT_TAIL(&tmpqueue, slot, next); ++added; /* Setup buffer of null pad bytes (definitely EOP) */ cpsw_cpdma_write_bd_next(sc->swsc, prev_slot, slot); prev_slot = slot; bd.next = 0; bd.bufptr = sc->swsc->null_mbuf_paddr; bd.bufoff = 0; bd.buflen = padlen; bd.pktlen = 0; bd.flags = CPDMA_BD_EOP | CPDMA_BD_OWNER | flags; cpsw_cpdma_write_bd(sc->swsc, slot, &bd); ++nsegs; } if (nsegs > sc->swsc->tx.longest_chain) sc->swsc->tx.longest_chain = nsegs; // TODO: Should we defer the BPF tap until // after all packets are queued? BPF_MTAP(sc->ifp, m0); } /* Attach the list of new buffers to the hardware TX queue. */ last_old_slot = STAILQ_LAST(&sc->swsc->tx.active, cpsw_slot, next); first_new_slot = STAILQ_FIRST(&tmpqueue); STAILQ_CONCAT(&sc->swsc->tx.active, &tmpqueue); if (first_new_slot == NULL) { return; } else if (last_old_slot == NULL) { /* Start a fresh queue. */ sc->swsc->last_hdp = cpsw_cpdma_bd_paddr(sc->swsc, first_new_slot); cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); } else { /* Add buffers to end of current queue. */ cpsw_cpdma_write_bd_next(sc->swsc, last_old_slot, first_new_slot); /* If underrun, restart queue. */ if (cpsw_cpdma_read_bd_flags(sc->swsc, last_old_slot) & CPDMA_BD_EOQ) { sc->swsc->last_hdp = cpsw_cpdma_bd_paddr(sc->swsc, first_new_slot); cpsw_write_hdp_slot(sc->swsc, &sc->swsc->tx, first_new_slot); } } sc->swsc->tx.queue_adds += added; sc->swsc->tx.active_queue_len += added; if (sc->swsc->tx.active_queue_len > sc->swsc->tx.max_active_queue_len) { sc->swsc->tx.max_active_queue_len = sc->swsc->tx.active_queue_len; } } static int cpsw_tx_dequeue(struct cpsw_softc *sc) { struct cpsw_slot *slot, *last_removed_slot = NULL; struct cpsw_cpdma_bd bd; uint32_t flags, removed = 0; slot = STAILQ_FIRST(&sc->tx.active); if (slot == NULL && cpsw_read_cp(sc, &sc->tx) == 0xfffffffc) { CPSW_DEBUGF(sc, ("TX teardown of an empty queue")); cpsw_write_cp(sc, &sc->tx, 0xfffffffc); sc->tx.running = 0; return (0); } /* Pull completed buffers off the hardware TX queue. */ while (slot != NULL) { flags = cpsw_cpdma_read_bd_flags(sc, slot); if (flags & CPDMA_BD_OWNER) break; /* Hardware is still using this packet. */ CPSW_DEBUGF(sc, ("TX removing completed packet")); bus_dmamap_sync(sc->mbuf_dtag, slot->dmamap, BUS_DMASYNC_POSTWRITE); bus_dmamap_unload(sc->mbuf_dtag, slot->dmamap); m_freem(slot->mbuf); slot->mbuf = NULL; if (slot->ifp) if_inc_counter(slot->ifp, IFCOUNTER_OPACKETS, 1); /* Dequeue any additional buffers used by this packet. */ while (slot != NULL && slot->mbuf == NULL) { STAILQ_REMOVE_HEAD(&sc->tx.active, next); STAILQ_INSERT_TAIL(&sc->tx.avail, slot, next); ++removed; last_removed_slot = slot; slot = STAILQ_FIRST(&sc->tx.active); } /* TearDown complete is only marked on the SOP for the packet. */ if ((flags & (CPDMA_BD_SOP | CPDMA_BD_TDOWNCMPLT)) == (CPDMA_BD_EOP | CPDMA_BD_TDOWNCMPLT)) { CPSW_DEBUGF(sc, ("TX teardown in progress")); cpsw_write_cp(sc, &sc->tx, 0xfffffffc); // TODO: Increment a count of dropped TX packets sc->tx.running = 0; break; } if ((flags & CPDMA_BD_EOP) == 0) flags = cpsw_cpdma_read_bd_flags(sc, last_removed_slot); if ((flags & (CPDMA_BD_EOP | CPDMA_BD_EOQ)) == (CPDMA_BD_EOP | CPDMA_BD_EOQ)) { cpsw_cpdma_read_bd(sc, last_removed_slot, &bd); if (bd.next != 0 && bd.next != sc->last_hdp) { /* Restart the queue. */ sc->last_hdp = bd.next; cpsw_write_4(sc, sc->tx.hdp_offset, bd.next); } } } if (removed != 0) { cpsw_write_cp_slot(sc, &sc->tx, last_removed_slot); sc->tx.queue_removes += removed; sc->tx.active_queue_len -= removed; sc->tx.avail_queue_len += removed; if (sc->tx.avail_queue_len > sc->tx.max_avail_queue_len) sc->tx.max_avail_queue_len = sc->tx.avail_queue_len; } return (removed); } /* * * Miscellaneous interrupts. * */ static void cpsw_intr_rx_thresh(void *arg) { struct cpsw_softc *sc = arg; uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_RX_THRESH_STAT(0)); CPSW_DEBUGF(sc, ("stat=%x", stat)); cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 0); } static void cpsw_intr_misc_host_error(struct cpsw_softc *sc) { uint32_t intstat; uint32_t dmastat; int txerr, rxerr, txchan, rxchan; printf("\n\n"); device_printf(sc->dev, "HOST ERROR: PROGRAMMING ERROR DETECTED BY HARDWARE\n"); printf("\n\n"); intstat = cpsw_read_4(sc, CPSW_CPDMA_DMA_INTSTAT_MASKED); device_printf(sc->dev, "CPSW_CPDMA_DMA_INTSTAT_MASKED=0x%x\n", intstat); dmastat = cpsw_read_4(sc, CPSW_CPDMA_DMASTATUS); device_printf(sc->dev, "CPSW_CPDMA_DMASTATUS=0x%x\n", dmastat); txerr = (dmastat >> 20) & 15; txchan = (dmastat >> 16) & 7; rxerr = (dmastat >> 12) & 15; rxchan = (dmastat >> 8) & 7; switch (txerr) { case 0: break; case 1: printf("SOP error on TX channel %d\n", txchan); break; case 2: printf("Ownership bit not set on SOP buffer on TX channel %d\n", txchan); break; case 3: printf("Zero Next Buffer but not EOP on TX channel %d\n", txchan); break; case 4: printf("Zero Buffer Pointer on TX channel %d\n", txchan); break; case 5: printf("Zero Buffer Length on TX channel %d\n", txchan); break; case 6: printf("Packet length error on TX channel %d\n", txchan); break; default: printf("Unknown error on TX channel %d\n", txchan); break; } if (txerr != 0) { printf("CPSW_CPDMA_TX%d_HDP=0x%x\n", txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_HDP(txchan))); printf("CPSW_CPDMA_TX%d_CP=0x%x\n", txchan, cpsw_read_4(sc, CPSW_CPDMA_TX_CP(txchan))); cpsw_dump_queue(sc, &sc->tx.active); } switch (rxerr) { case 0: break; case 2: printf("Ownership bit not set on RX channel %d\n", rxchan); break; case 4: printf("Zero Buffer Pointer on RX channel %d\n", rxchan); break; case 5: printf("Zero Buffer Length on RX channel %d\n", rxchan); break; case 6: printf("Buffer offset too big on RX channel %d\n", rxchan); break; default: printf("Unknown RX error on RX channel %d\n", rxchan); break; } if (rxerr != 0) { printf("CPSW_CPDMA_RX%d_HDP=0x%x\n", rxchan, cpsw_read_4(sc,CPSW_CPDMA_RX_HDP(rxchan))); printf("CPSW_CPDMA_RX%d_CP=0x%x\n", rxchan, cpsw_read_4(sc, CPSW_CPDMA_RX_CP(rxchan))); cpsw_dump_queue(sc, &sc->rx.active); } printf("\nALE Table\n"); cpsw_ale_dump_table(sc); // XXX do something useful here?? panic("CPSW HOST ERROR INTERRUPT"); // Suppress this interrupt in the future. cpsw_write_4(sc, CPSW_CPDMA_DMA_INTMASK_CLEAR, intstat); printf("XXX HOST ERROR INTERRUPT SUPPRESSED\n"); // The watchdog will probably reset the controller // in a little while. It will probably fail again. } static void cpsw_intr_misc(void *arg) { struct cpsw_softc *sc = arg; uint32_t stat = cpsw_read_4(sc, CPSW_WR_C_MISC_STAT(0)); if (stat & CPSW_WR_C_MISC_EVNT_PEND) CPSW_DEBUGF(sc, ("Time sync event interrupt unimplemented")); if (stat & CPSW_WR_C_MISC_STAT_PEND) cpsw_stats_collect(sc); if (stat & CPSW_WR_C_MISC_HOST_PEND) cpsw_intr_misc_host_error(sc); if (stat & CPSW_WR_C_MISC_MDIOLINK) { cpsw_write_4(sc, MDIOLINKINTMASKED, cpsw_read_4(sc, MDIOLINKINTMASKED)); } if (stat & CPSW_WR_C_MISC_MDIOUSER) { CPSW_DEBUGF(sc, ("MDIO operation completed interrupt unimplemented")); } cpsw_write_4(sc, CPSW_CPDMA_CPDMA_EOI_VECTOR, 3); } /* * * Periodic Checks and Watchdog. * */ static void cpswp_tick(void *msc) { struct cpswp_softc *sc = msc; /* Check for media type change */ mii_tick(sc->mii); if (sc->media_status != sc->mii->mii_media.ifm_media) { printf("%s: media type changed (ifm_media=%x)\n", __func__, sc->mii->mii_media.ifm_media); cpswp_ifmedia_upd(sc->ifp); } /* Schedule another timeout one second from now */ callout_reset(&sc->mii_callout, hz, cpswp_tick, sc); } static void cpswp_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) { struct cpswp_softc *sc; struct mii_data *mii; sc = ifp->if_softc; CPSWP_DEBUGF(sc, ("")); CPSW_PORT_LOCK(sc); mii = sc->mii; mii_pollstat(mii); ifmr->ifm_active = mii->mii_media_active; ifmr->ifm_status = mii->mii_media_status; CPSW_PORT_UNLOCK(sc); } static int cpswp_ifmedia_upd(struct ifnet *ifp) { struct cpswp_softc *sc; sc = ifp->if_softc; CPSWP_DEBUGF(sc, ("")); CPSW_PORT_LOCK(sc); mii_mediachg(sc->mii); sc->media_status = sc->mii->mii_media.ifm_media; CPSW_PORT_UNLOCK(sc); return (0); } static void cpsw_tx_watchdog_full_reset(struct cpsw_softc *sc) { struct cpswp_softc *psc; int i; cpsw_debugf_head("CPSW watchdog"); device_printf(sc->dev, "watchdog timeout\n"); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; psc = device_get_softc(sc->port[i].dev); CPSW_PORT_LOCK(psc); cpswp_stop_locked(psc); CPSW_PORT_UNLOCK(psc); } } static void cpsw_tx_watchdog(void *msc) { struct cpsw_softc *sc; sc = msc; CPSW_GLOBAL_LOCK(sc); if (sc->tx.active_queue_len == 0 || !sc->tx.running) { sc->watchdog.timer = 0; /* Nothing to do. */ } else if (sc->tx.queue_removes > sc->tx.queue_removes_at_last_tick) { sc->watchdog.timer = 0; /* Stuff done while we weren't looking. */ } else if (cpsw_tx_dequeue(sc) > 0) { sc->watchdog.timer = 0; /* We just did something. */ } else { /* There was something to do but it didn't get done. */ ++sc->watchdog.timer; if (sc->watchdog.timer > 5) { sc->watchdog.timer = 0; ++sc->watchdog.resets; cpsw_tx_watchdog_full_reset(sc); } } sc->tx.queue_removes_at_last_tick = sc->tx.queue_removes; CPSW_GLOBAL_UNLOCK(sc); /* Schedule another timeout one second from now */ callout_reset(&sc->watchdog.callout, hz, cpsw_tx_watchdog, sc); } /* * * ALE support routines. * */ static void cpsw_ale_read_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) { cpsw_write_4(sc, CPSW_ALE_TBLCTL, idx & 1023); ale_entry[0] = cpsw_read_4(sc, CPSW_ALE_TBLW0); ale_entry[1] = cpsw_read_4(sc, CPSW_ALE_TBLW1); ale_entry[2] = cpsw_read_4(sc, CPSW_ALE_TBLW2); } static void cpsw_ale_write_entry(struct cpsw_softc *sc, uint16_t idx, uint32_t *ale_entry) { cpsw_write_4(sc, CPSW_ALE_TBLW0, ale_entry[0]); cpsw_write_4(sc, CPSW_ALE_TBLW1, ale_entry[1]); cpsw_write_4(sc, CPSW_ALE_TBLW2, ale_entry[2]); cpsw_write_4(sc, CPSW_ALE_TBLCTL, 1 << 31 | (idx & 1023)); } static void cpsw_ale_remove_all_mc_entries(struct cpsw_softc *sc) { int i; uint32_t ale_entry[3]; /* First four entries are link address and broadcast. */ for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); if ((ALE_TYPE(ale_entry) == ALE_TYPE_ADDR || ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) && ALE_MCAST(ale_entry) == 1) { /* MCast link addr */ ale_entry[0] = ale_entry[1] = ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); } } } static int cpsw_ale_mc_entry_set(struct cpsw_softc *sc, uint8_t portmap, int vlan, uint8_t *mac) { int free_index = -1, matching_index = -1, i; uint32_t ale_entry[3], ale_type; /* Find a matching entry or a free entry. */ for (i = 10; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); /* Entry Type[61:60] is 0 for free entry */ if (free_index < 0 && ALE_TYPE(ale_entry) == 0) free_index = i; if ((((ale_entry[1] >> 8) & 0xFF) == mac[0]) && (((ale_entry[1] >> 0) & 0xFF) == mac[1]) && (((ale_entry[0] >>24) & 0xFF) == mac[2]) && (((ale_entry[0] >>16) & 0xFF) == mac[3]) && (((ale_entry[0] >> 8) & 0xFF) == mac[4]) && (((ale_entry[0] >> 0) & 0xFF) == mac[5])) { matching_index = i; break; } } if (matching_index < 0) { if (free_index < 0) return (ENOMEM); i = free_index; } if (vlan != -1) ale_type = ALE_TYPE_VLAN_ADDR << 28 | vlan << 16; else ale_type = ALE_TYPE_ADDR << 28; /* Set MAC address */ ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; ale_entry[1] = mac[0] << 8 | mac[1]; /* Entry type[61:60] and Mcast fwd state[63:62] is fw(3). */ ale_entry[1] |= ALE_MCAST_FWD | ale_type; /* Set portmask [68:66] */ ale_entry[2] = (portmap & 7) << 2; cpsw_ale_write_entry(sc, i, ale_entry); return 0; } static void cpsw_ale_dump_table(struct cpsw_softc *sc) { int i; uint32_t ale_entry[3]; for (i = 0; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); switch (ALE_TYPE(ale_entry)) { case ALE_TYPE_VLAN: printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], ale_entry[1], ale_entry[0]); printf("type: %u ", ALE_TYPE(ale_entry)); printf("vlan: %u ", ALE_VLAN(ale_entry)); printf("untag: %u ", ALE_VLAN_UNTAG(ale_entry)); printf("reg flood: %u ", ALE_VLAN_REGFLOOD(ale_entry)); printf("unreg flood: %u ", ALE_VLAN_UNREGFLOOD(ale_entry)); printf("members: %u ", ALE_VLAN_MEMBERS(ale_entry)); printf("\n"); break; case ALE_TYPE_ADDR: case ALE_TYPE_VLAN_ADDR: printf("ALE[%4u] %08x %08x %08x ", i, ale_entry[2], ale_entry[1], ale_entry[0]); printf("type: %u ", ALE_TYPE(ale_entry)); printf("mac: %02x:%02x:%02x:%02x:%02x:%02x ", (ale_entry[1] >> 8) & 0xFF, (ale_entry[1] >> 0) & 0xFF, (ale_entry[0] >>24) & 0xFF, (ale_entry[0] >>16) & 0xFF, (ale_entry[0] >> 8) & 0xFF, (ale_entry[0] >> 0) & 0xFF); printf(ALE_MCAST(ale_entry) ? "mcast " : "ucast "); if (ALE_TYPE(ale_entry) == ALE_TYPE_VLAN_ADDR) printf("vlan: %u ", ALE_VLAN(ale_entry)); printf("port: %u ", ALE_PORTS(ale_entry)); printf("\n"); break; } } printf("\n"); } static int cpswp_ale_update_addresses(struct cpswp_softc *sc, int purge) { uint8_t *mac; uint32_t ale_entry[3], ale_type, portmask; struct ifmultiaddr *ifma; if (sc->swsc->dualemac) { ale_type = ALE_TYPE_VLAN_ADDR << 28 | sc->vlan << 16; portmask = 1 << (sc->unit + 1) | 1 << 0; } else { ale_type = ALE_TYPE_ADDR << 28; portmask = 7; } /* * Route incoming packets for our MAC address to Port 0 (host). * For simplicity, keep this entry at table index 0 for port 1 and * at index 2 for port 2 in the ALE. */ if_addr_rlock(sc->ifp); mac = LLADDR((struct sockaddr_dl *)sc->ifp->if_addr->ifa_addr); ale_entry[0] = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5]; ale_entry[1] = ale_type | mac[0] << 8 | mac[1]; /* addr entry + mac */ ale_entry[2] = 0; /* port = 0 */ cpsw_ale_write_entry(sc->swsc, 0 + 2 * sc->unit, ale_entry); /* Set outgoing MAC Address for slave port. */ cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_HI(sc->unit + 1), mac[3] << 24 | mac[2] << 16 | mac[1] << 8 | mac[0]); cpsw_write_4(sc->swsc, CPSW_PORT_P_SA_LO(sc->unit + 1), mac[5] << 8 | mac[4]); if_addr_runlock(sc->ifp); /* Keep the broadcast address at table entry 1 (or 3). */ ale_entry[0] = 0xffffffff; /* Lower 32 bits of MAC */ /* ALE_MCAST_FWD, Addr type, upper 16 bits of Mac */ ale_entry[1] = ALE_MCAST_FWD | ale_type | 0xffff; ale_entry[2] = portmask << 2; cpsw_ale_write_entry(sc->swsc, 1 + 2 * sc->unit, ale_entry); /* SIOCDELMULTI doesn't specify the particular address being removed, so we have to remove all and rebuild. */ if (purge) cpsw_ale_remove_all_mc_entries(sc->swsc); /* Set other multicast addrs desired. */ if_maddr_rlock(sc->ifp); TAILQ_FOREACH(ifma, &sc->ifp->if_multiaddrs, ifma_link) { if (ifma->ifma_addr->sa_family != AF_LINK) continue; cpsw_ale_mc_entry_set(sc->swsc, portmask, sc->vlan, LLADDR((struct sockaddr_dl *)ifma->ifma_addr)); } if_maddr_runlock(sc->ifp); return (0); } static int cpsw_ale_update_vlan_table(struct cpsw_softc *sc, int vlan, int ports, int untag, int mcregflood, int mcunregflood) { int free_index, i, matching_index; uint32_t ale_entry[3]; free_index = matching_index = -1; /* Find a matching entry or a free entry. */ for (i = 5; i < CPSW_MAX_ALE_ENTRIES; i++) { cpsw_ale_read_entry(sc, i, ale_entry); /* Entry Type[61:60] is 0 for free entry */ if (free_index < 0 && ALE_TYPE(ale_entry) == 0) free_index = i; if (ALE_VLAN(ale_entry) == vlan) { matching_index = i; break; } } if (matching_index < 0) { if (free_index < 0) return (-1); i = free_index; } ale_entry[0] = (untag & 7) << 24 | (mcregflood & 7) << 16 | (mcunregflood & 7) << 8 | (ports & 7); ale_entry[1] = ALE_TYPE_VLAN << 28 | vlan << 16; ale_entry[2] = 0; cpsw_ale_write_entry(sc, i, ale_entry); return (0); } /* * * Statistics and Sysctls. * */ #if 0 static void cpsw_stats_dump(struct cpsw_softc *sc) { int i; uint32_t r; for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { r = cpsw_read_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg); CPSW_DEBUGF(sc, ("%s: %ju + %u = %ju", cpsw_stat_sysctls[i].oid, (intmax_t)sc->shadow_stats[i], r, (intmax_t)sc->shadow_stats[i] + r)); } } #endif static void cpsw_stats_collect(struct cpsw_softc *sc) { int i; uint32_t r; CPSW_DEBUGF(sc, ("Controller shadow statistics updated.")); for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { r = cpsw_read_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg); sc->shadow_stats[i] += r; cpsw_write_4(sc, CPSW_STATS_OFFSET + cpsw_stat_sysctls[i].reg, r); } } static int cpsw_stats_sysctl(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *sc; struct cpsw_stat *stat; uint64_t result; sc = (struct cpsw_softc *)arg1; stat = &cpsw_stat_sysctls[oidp->oid_number]; result = sc->shadow_stats[oidp->oid_number]; result += cpsw_read_4(sc, CPSW_STATS_OFFSET + stat->reg); return (sysctl_handle_64(oidp, &result, 0, req)); } static int cpsw_stat_attached(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *sc; struct bintime t; unsigned result; sc = (struct cpsw_softc *)arg1; getbinuptime(&t); bintime_sub(&t, &sc->attach_uptime); result = t.sec; return (sysctl_handle_int(oidp, &result, 0, req)); } static int cpsw_stat_uptime(SYSCTL_HANDLER_ARGS) { struct cpsw_softc *swsc; struct cpswp_softc *sc; struct bintime t; unsigned result; swsc = arg1; sc = device_get_softc(swsc->port[arg2].dev); if (sc->ifp->if_drv_flags & IFF_DRV_RUNNING) { getbinuptime(&t); bintime_sub(&t, &sc->init_uptime); result = t.sec; } else result = 0; return (sysctl_handle_int(oidp, &result, 0, req)); } static void cpsw_add_queue_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_queue *queue) { struct sysctl_oid_list *parent; parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "totalBuffers", CTLFLAG_RD, &queue->queue_slots, 0, "Total buffers currently assigned to this queue"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "activeBuffers", CTLFLAG_RD, &queue->active_queue_len, 0, "Buffers currently registered with hardware controller"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxActiveBuffers", CTLFLAG_RD, &queue->max_active_queue_len, 0, "Max value of activeBuffers since last driver reset"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "availBuffers", CTLFLAG_RD, &queue->avail_queue_len, 0, "Buffers allocated to this queue but not currently " "registered with hardware controller"); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "maxAvailBuffers", CTLFLAG_RD, &queue->max_avail_queue_len, 0, "Max value of availBuffers since last driver reset"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalEnqueued", CTLFLAG_RD, &queue->queue_adds, 0, "Total buffers added to queue"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "totalDequeued", CTLFLAG_RD, &queue->queue_removes, 0, "Total buffers removed from queue"); SYSCTL_ADD_UINT(ctx, parent, OID_AUTO, "longestChain", CTLFLAG_RD, &queue->longest_chain, 0, "Max buffers used for a single packet"); } static void cpsw_add_watchdog_sysctls(struct sysctl_ctx_list *ctx, struct sysctl_oid *node, struct cpsw_softc *sc) { struct sysctl_oid_list *parent; parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "resets", CTLFLAG_RD, &sc->watchdog.resets, 0, "Total number of watchdog resets"); } static void cpsw_add_sysctls(struct cpsw_softc *sc) { struct sysctl_ctx_list *ctx; struct sysctl_oid *stats_node, *queue_node, *node; struct sysctl_oid_list *parent, *stats_parent, *queue_parent; struct sysctl_oid_list *ports_parent, *port_parent; char port[16]; int i; ctx = device_get_sysctl_ctx(sc->dev); parent = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); SYSCTL_ADD_INT(ctx, parent, OID_AUTO, "debug", CTLFLAG_RW, &sc->debug, 0, "Enable switch debug messages"); SYSCTL_ADD_PROC(ctx, parent, OID_AUTO, "attachedSecs", CTLTYPE_UINT | CTLFLAG_RD, sc, 0, cpsw_stat_attached, "IU", "Time since driver attach"); node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "ports", CTLFLAG_RD, NULL, "CPSW Ports Statistics"); ports_parent = SYSCTL_CHILDREN(node); for (i = 0; i < CPSW_PORTS; i++) { if (!sc->dualemac && i != sc->active_slave) continue; port[0] = '0' + i; port[1] = '\0'; node = SYSCTL_ADD_NODE(ctx, ports_parent, OID_AUTO, port, CTLFLAG_RD, NULL, "CPSW Port Statistics"); port_parent = SYSCTL_CHILDREN(node); SYSCTL_ADD_PROC(ctx, port_parent, OID_AUTO, "uptime", CTLTYPE_UINT | CTLFLAG_RD, sc, i, cpsw_stat_uptime, "IU", "Seconds since driver init"); } stats_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "stats", CTLFLAG_RD, NULL, "CPSW Statistics"); stats_parent = SYSCTL_CHILDREN(stats_node); for (i = 0; i < CPSW_SYSCTL_COUNT; ++i) { SYSCTL_ADD_PROC(ctx, stats_parent, i, cpsw_stat_sysctls[i].oid, CTLTYPE_U64 | CTLFLAG_RD, sc, 0, cpsw_stats_sysctl, "IU", cpsw_stat_sysctls[i].oid); } queue_node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "queue", CTLFLAG_RD, NULL, "CPSW Queue Statistics"); queue_parent = SYSCTL_CHILDREN(queue_node); node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "tx", CTLFLAG_RD, NULL, "TX Queue Statistics"); cpsw_add_queue_sysctls(ctx, node, &sc->tx); node = SYSCTL_ADD_NODE(ctx, queue_parent, OID_AUTO, "rx", CTLFLAG_RD, NULL, "RX Queue Statistics"); cpsw_add_queue_sysctls(ctx, node, &sc->rx); node = SYSCTL_ADD_NODE(ctx, parent, OID_AUTO, "watchdog", CTLFLAG_RD, NULL, "Watchdog Statistics"); cpsw_add_watchdog_sysctls(ctx, node, sc); } Index: stable/11 =================================================================== --- stable/11 (revision 310854) +++ stable/11 (revision 310855) Property changes on: stable/11 ___________________________________________________________________ Modified: svn:mergeinfo ## -0,0 +0,1 ## Merged /head:r305112-305113,305119,305141,305432